diff --git a/openshift/Dockerfile.openshift b/openshift/Dockerfile.openshift index 84e1b7f508..758dce557f 100644 --- a/openshift/Dockerfile.openshift +++ b/openshift/Dockerfile.openshift @@ -12,7 +12,7 @@ FROM registry.ci.openshift.org/ocp/4.22:base-rhel9 LABEL description="Cluster API Provider IBM Cloud Controller Manager" COPY --from=builder /build/manager /manager -COPY --from=builder /build/openshift/manifests /manifests +COPY openshift/capi-operator-manifests /capi-operator-manifests ENTRYPOINT [ "/manager" ] diff --git a/openshift/Makefile b/openshift/Makefile index 9b3c840147..f5b3dee178 100644 --- a/openshift/Makefile +++ b/openshift/Makefile @@ -8,22 +8,27 @@ $(RELEASE_DIR): MANIFESTS_GEN := go run ./vendor/github.com/openshift/cluster-capi-operator/manifests-gen/ -$(KUSTOMIZE): - ./tools/ensure-kustomize.sh - .PHONY: verify verify: verify-ocp-manifests .PHONY: verify-% -verify-%: ## Ensure no diff after running some other target +verify-%: $(MAKE) $* ./verify-diff.sh .PHONY: update-manifests-gen update-manifests-gen: - cd tools && go get github.com/openshift/cluster-capi-operator/manifests-gen && go mod tidy && go mod vendor + cd tools && go get github.com/openshift/cluster-capi-operator/manifests-gen@main github.com/openshift/cluster-capi-operator@main && go mod tidy && go mod vendor .PHONY: ocp-manifests -ocp-manifests: $(RELEASE_DIR) ## Builds openshift specific manifests - # Generate provider manifests. - cd tools && $(MANIFESTS_GEN) --provider-name "powervs" --provider-type "InfrastructureProvider" --provider-version "${PROVIDER_VERSION}" --base-path "../../" --manifests-path "../manifests" +ocp-manifests: ## Builds openshift specific manifests + cd tools && $(MANIFESTS_GEN) \ + --manifests-path "../capi-operator-manifests" \ + --kustomize-dir "../../openshift" \ + --name cluster-api-provider-ibmcloud \ + --install-order 20 \ + --attribute type=infrastructure \ + --attribute version="${PROVIDER_VERSION}" \ + --self-image-ref registry.ci.openshift.org/openshift:ibmcloud-cluster-api-controllers \ + --platform PowerVS \ + --protect-cluster-resource ibmpowervscluster diff --git a/openshift/capi-operator-manifests/default/manifests.yaml b/openshift/capi-operator-manifests/default/manifests.yaml new file mode 100644 index 0000000000..349c087795 --- /dev/null +++ b/openshift/capi-operator-manifests/default/manifests.yaml @@ -0,0 +1,8569 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmvpcclusters.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: Cg== + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: IBMVPCCluster + listKind: IBMVPCClusterList + plural: ibmvpcclusters + singular: ibmvpccluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster to which this IBMVPCCluster belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Cluster infrastructure is ready for IBM VPC instances + jsonPath: .status.ready + name: Ready + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: IBMVPCCluster is the Schema for the ibmvpcclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMVPCClusterSpec defines the desired state of IBMVPCCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + controlPlaneLoadBalancer: + description: ControlPlaneLoadBalancer is optional configuration for + customizing control plane behavior. + properties: + name: + description: Name sets the name of the VPC load balancer. + maxLength: 63 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + type: object + region: + description: The IBM Cloud Region the cluster lives in. + type: string + resourceGroup: + description: The VPC resources should be created under the resource + group. + type: string + vpc: + description: The Name of VPC. + type: string + zone: + description: The Name of availability zone. + type: string + required: + - region + - resourceGroup + type: object + status: + description: IBMVPCClusterStatus defines the observed state of IBMVPCCluster. + properties: + conditions: + description: Conditions defines current service state of the load + balancer. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + controlPlaneLoadBalancerState: + description: ControlPlaneLoadBalancerState is the status of the load + balancer. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + subnet: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + type: string + name: + type: string + zone: + type: string + required: + - cidr + - id + - name + - zone + type: object + vpc: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + properties: + id: + type: string + name: + type: string + required: + - id + - name + type: object + vpcEndpoint: + description: VPCEndpoint describes a VPCEndpoint. + properties: + address: + type: string + floatingIPID: + description: 'Deprecated: This field has no function and is going + to be removed in the next release.' + type: string + loadBalancerIPID: + type: string + required: + - address + type: object + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Cluster to which this IBMVPCCluster belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Cluster infrastructure is ready for IBM VPC instances + jsonPath: .status.ready + name: Ready + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: IBMVPCCluster is the Schema for the ibmvpcclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMVPCClusterSpec defines the desired state of IBMVPCCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + controlPlaneLoadBalancer: + description: |- + ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. + Use this for legacy support, use Network.LoadBalancers for the extended VPC support. + properties: + additionalListeners: + description: AdditionalListeners sets the additional listeners + for the control plane load balancer. + items: + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an VPC load balancer. + properties: + defaultPoolName: + description: defaultPoolName defines the name of a VPC Load + Balancer Backend Pool to use for the VPC Load Balancer + Listener. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + port: + description: Port sets the port for the additional listener. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: |- + protocol defines the protocol to use for the VPC Load Balancer Listener. + Will default to TCP protocol if not specified. + enum: + - http + - https + - tcp + - udp + type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + x-kubernetes-list-type: map + backendPools: + description: backendPools defines the load balancer's backend + pools. + items: + description: VPCLoadBalancerBackendPoolSpec defines the desired + configuration of a VPC Load Balancer Backend Pool. + properties: + algorithm: + description: algorithm defines the load balancing algorithm + to use. + enum: + - least_connections + - round_robin + - weighted_round_robin + type: string + healthMonitor: + description: healthMonitor defines the backend pool's health + monitor. + properties: + delay: + description: delay defines the seconds to wait between + health checks. + format: int64 + maximum: 60 + minimum: 2 + type: integer + port: + description: port defines the port to perform health + monitoring on. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + retries: + description: retries defines the max retries for health + check. + format: int64 + maximum: 10 + minimum: 1 + type: integer + timeout: + description: timeout defines the seconds to wait for + a health check response. + format: int64 + maximum: 59 + minimum: 1 + type: integer + type: + description: type defines the protocol used for health + checks. + enum: + - http + - https + - tcp + type: string + urlPath: + description: urlPath defines the URL to use for health + monitoring. + pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ + type: string + required: + - delay + - retries + - timeout + - type + type: object + name: + description: name defines the name of the Backend Pool. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + protocol: + description: protocol defines the protocol to use for the + Backend Pool. + enum: + - http + - https + - tcp + - udp + type: string + required: + - algorithm + - healthMonitor + - protocol + type: object + type: array + id: + description: id of the loadbalancer + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: Name sets the name of the VPC load balancer. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + public: + default: true + description: public indicates that load balancer is public or + private + type: boolean + securityGroups: + description: |- + securityGroups defines the Security Groups to attach to the load balancer. + Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnets: + description: |- + subnets defines the VPC Subnets to attach to the load balancer. + Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + type: object + image: + description: image represents the Image details used for the cluster. + properties: + cosBucket: + description: cosBucket is the name of the IBM Cloud COS Bucket + containing the source of the image, if necessary. + type: string + cosBucketRegion: + description: cosBucketRegion is the COS region the bucket is in. + type: string + cosInstance: + description: cosInstance is the name of the IBM Cloud COS Instance + containing the source of the image, if necessary. + type: string + cosObject: + description: cosObject is the name of a IBM Cloud COS Object used + as the source of the image, if necessary. + type: string + crn: + description: crn is the IBM Cloud CRN of the existing VPC Custom + Image. + type: string + name: + description: name is the name of the desired VPC Custom Image. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + operatingSystem: + description: operatingSystem is the Custom Image's Operating System + name. + type: string + resourceGroup: + description: resourceGroup is the Resource Group to create the + Custom Image in. + properties: + id: + description: id defines the IBM Cloud Resource ID. + type: string + name: + description: name defines the IBM Cloud Resource Name. + type: string + required: + - id + type: object + type: object + x-kubernetes-validations: + - message: if any of cosInstance, cosBucket, or cosObject are specified, + all must be specified + rule: (!has(self.cosInstance) && !has(self.cosBucket) && !has(self.cosObject)) + || (has(self.cosInstance) && has(self.cosBucket) && has(self.cosObject)) + - message: an existing image name or crn must be provided, or to create + a new image the cos resources must be provided, with or without + a name + rule: has(self.name) || has(self.crn) || (has(self.cosInstance) + && has(self.cosBucket) && has(self.cosObject)) + network: + description: network represents the VPC network to use for the cluster. + properties: + controlPlaneSubnets: + description: controlPlaneSubnets is a set of Subnet's which define + the Control Plane subnets. + items: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + zone: + type: string + type: object + type: array + loadBalancers: + description: loadBalancers is a set of VPC Load Balancer definitions + to use for the cluster. + items: + description: VPCLoadBalancerSpec defines the desired state of + an VPC load balancer. + properties: + additionalListeners: + description: AdditionalListeners sets the additional listeners + for the control plane load balancer. + items: + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an VPC load balancer. + properties: + defaultPoolName: + description: defaultPoolName defines the name of a + VPC Load Balancer Backend Pool to use for the VPC + Load Balancer Listener. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + port: + description: Port sets the port for the additional + listener. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: |- + protocol defines the protocol to use for the VPC Load Balancer Listener. + Will default to TCP protocol if not specified. + enum: + - http + - https + - tcp + - udp + type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + x-kubernetes-list-type: map + backendPools: + description: backendPools defines the load balancer's backend + pools. + items: + description: VPCLoadBalancerBackendPoolSpec defines the + desired configuration of a VPC Load Balancer Backend + Pool. + properties: + algorithm: + description: algorithm defines the load balancing + algorithm to use. + enum: + - least_connections + - round_robin + - weighted_round_robin + type: string + healthMonitor: + description: healthMonitor defines the backend pool's + health monitor. + properties: + delay: + description: delay defines the seconds to wait + between health checks. + format: int64 + maximum: 60 + minimum: 2 + type: integer + port: + description: port defines the port to perform + health monitoring on. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + retries: + description: retries defines the max retries for + health check. + format: int64 + maximum: 10 + minimum: 1 + type: integer + timeout: + description: timeout defines the seconds to wait + for a health check response. + format: int64 + maximum: 59 + minimum: 1 + type: integer + type: + description: type defines the protocol used for + health checks. + enum: + - http + - https + - tcp + type: string + urlPath: + description: urlPath defines the URL to use for + health monitoring. + pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ + type: string + required: + - delay + - retries + - timeout + - type + type: object + name: + description: name defines the name of the Backend + Pool. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + protocol: + description: protocol defines the protocol to use + for the Backend Pool. + enum: + - http + - https + - tcp + - udp + type: string + required: + - algorithm + - healthMonitor + - protocol + type: object + type: array + id: + description: id of the loadbalancer + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: Name sets the name of the VPC load balancer. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + public: + default: true + description: public indicates that load balancer is public + or private + type: boolean + securityGroups: + description: |- + securityGroups defines the Security Groups to attach to the load balancer. + Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnets: + description: |- + subnets defines the VPC Subnets to attach to the load balancer. + Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + type: object + type: array + resourceGroup: + description: |- + resourceGroup is the Resource Group containing all of the newtork resources. + This can be different than the Resource Group containing the remaining cluster resources. + properties: + id: + description: id defines the IBM Cloud Resource ID. + type: string + name: + description: name defines the IBM Cloud Resource Name. + type: string + required: + - id + type: object + securityGroups: + description: securityGroups is a set of VPCSecurityGroup's which + define the VPC Security Groups that manage traffic within and + out of the VPC. + items: + description: VPCSecurityGroup defines a VPC Security Group that + should exist or be created within the specified VPC, with + the specified Security Group Rules. + properties: + id: + description: id of the Security Group. + type: string + name: + description: name of the Security Group. + type: string + rules: + description: rules are the Security Group Rules for the + Security Group. + items: + description: VPCSecurityGroupRule defines a VPC Security + Group Rule for a specified Security Group. + properties: + action: + description: action defines whether to allow or deny + traffic defined by the Security Group Rule. + enum: + - allow + - deny + type: string + destination: + description: |- + destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionOutbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports allowed + for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive + upper range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive + lower range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater than or + equal to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic protocol + used for the Security Group Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines the type + of filter to define for the remote's destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, and securityGroupName + are not valid for VPCSecurityGroupRuleRemoteTypeAny + remoteType + rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid for + VPCSecurityGroupRuleRemoteTypeCIDR remoteType + rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' ? (has(self.address) + && !has(self.cidrSubnetName) && !has(self.securityGroupName)) + : true' + - message: only securityGroupName is valid for + VPCSecurityGroupRuleRemoteTypeSG remoteType + rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) + && !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only supported + for VPCSecurityGroupRuleProtocolIcmp protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + direction: + description: direction defines whether the traffic + is inbound or outbound for the Security Group Rule. + enum: + - inbound + - outbound + type: string + securityGroupID: + description: securityGroupID is the ID of the Security + Group for the Security Group Rule. + type: string + source: + description: |- + source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionInbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports allowed + for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive + upper range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive + lower range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater than or + equal to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic protocol + used for the Security Group Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines the type + of filter to define for the remote's destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, and securityGroupName + are not valid for VPCSecurityGroupRuleRemoteTypeAny + remoteType + rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid for + VPCSecurityGroupRuleRemoteTypeCIDR remoteType + rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' ? (has(self.address) + && !has(self.cidrSubnetName) && !has(self.securityGroupName)) + : true' + - message: only securityGroupName is valid for + VPCSecurityGroupRuleRemoteTypeSG remoteType + rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) + && !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only supported + for VPCSecurityGroupRuleProtocolIcmp protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + required: + - action + - direction + type: object + x-kubernetes-validations: + - message: both destination and source cannot be provided + rule: (has(self.destination) && !has(self.source)) || + (!has(self.destination) && has(self.source)) + - message: source must be set for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? has(self.source) + : true' + - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? !has(self.destination) + : true' + - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? has(self.destination) + : true' + - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? !has(self.source) + : true' + type: array + tags: + description: tags are tags to add to the Security Group. + items: + type: string + type: array + type: object + x-kubernetes-validations: + - message: either an id or name must be specified + rule: has(self.id) || has(self.name) + type: array + vpc: + description: vpc defines the IBM Cloud VPC for extended VPC Infrastructure + support. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + workerSubnets: + description: workerSubnets is a set of Subnet's which define the + Worker subnets. + items: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + zone: + type: string + type: object + type: array + type: object + region: + description: The IBM Cloud Region the cluster lives in. + type: string + resourceGroup: + description: The VPC resources should be created under the resource + group. + type: string + vpc: + description: The Name of VPC. + type: string + zone: + description: The Name of availability zone. + type: string + required: + - region + - resourceGroup + type: object + status: + description: IBMVPCClusterStatus defines the observed state of IBMVPCCluster. + properties: + conditions: + description: Conditions defines current service state of the load + balancer. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + controlPlaneLoadBalancerState: + description: ControlPlaneLoadBalancerState is the status of the load + balancer. + type: string + image: + description: image is the status of the VPC Custom Image. + properties: + id: + description: id defines the Id of the IBM Cloud resource status. + type: string + name: + description: name defines the name of the IBM Cloud resource status. + type: string + ready: + description: ready defines whether the IBM Cloud resource is ready. + type: boolean + required: + - id + - ready + type: object + network: + description: network is the status of the VPC network resources for + extended VPC Infrastructure support. + properties: + controlPlaneSubnets: + additionalProperties: + description: ResourceStatus identifies a resource by id (and + name) and whether it is ready. + properties: + id: + description: id defines the Id of the IBM Cloud resource + status. + type: string + name: + description: name defines the name of the IBM Cloud resource + status. + type: string + ready: + description: ready defines whether the IBM Cloud resource + is ready. + type: boolean + required: + - id + - ready + type: object + description: |- + controlPlaneSubnets references the VPC Subnets for the cluster's Control Plane. + The map simplifies lookups. + type: object + loadBalancers: + additionalProperties: + description: VPCLoadBalancerStatus defines the status VPC load + balancer. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + hostname: + description: hostname is the hostname of load balancer. + type: string + id: + description: id of VPC load balancer. + type: string + state: + description: State is the status of the load balancer. + type: string + type: object + description: |- + loadBalancers references the VPC Load Balancer's for the cluster. + The map simplifies lookups. + type: object + publicGateways: + additionalProperties: + description: ResourceStatus identifies a resource by id (and + name) and whether it is ready. + properties: + id: + description: id defines the Id of the IBM Cloud resource + status. + type: string + name: + description: name defines the name of the IBM Cloud resource + status. + type: string + ready: + description: ready defines whether the IBM Cloud resource + is ready. + type: boolean + required: + - id + - ready + type: object + description: |- + publicGateways references the VPC Public Gateways for the cluster. + The map simplifies lookups. + type: object + resourceGroup: + description: |- + resourceGroup references the Resource Group for Network resources for the cluster. + This can be the same or unique from the cluster's Resource Group. + properties: + id: + description: id defines the Id of the IBM Cloud resource status. + type: string + name: + description: name defines the name of the IBM Cloud resource + status. + type: string + ready: + description: ready defines whether the IBM Cloud resource + is ready. + type: boolean + required: + - id + - ready + type: object + securityGroups: + additionalProperties: + description: ResourceStatus identifies a resource by id (and + name) and whether it is ready. + properties: + id: + description: id defines the Id of the IBM Cloud resource + status. + type: string + name: + description: name defines the name of the IBM Cloud resource + status. + type: string + ready: + description: ready defines whether the IBM Cloud resource + is ready. + type: boolean + required: + - id + - ready + type: object + description: |- + securityGroups references the VPC Security Groups for the cluster. + The map simplifies lookups. + type: object + vpc: + description: vpc references the status of the IBM Cloud VPC as + part of the extended VPC Infrastructure support. + properties: + id: + description: id defines the Id of the IBM Cloud resource status. + type: string + name: + description: name defines the name of the IBM Cloud resource + status. + type: string + ready: + description: ready defines whether the IBM Cloud resource + is ready. + type: boolean + required: + - id + - ready + type: object + workerSubnets: + additionalProperties: + description: ResourceStatus identifies a resource by id (and + name) and whether it is ready. + properties: + id: + description: id defines the Id of the IBM Cloud resource + status. + type: string + name: + description: name defines the name of the IBM Cloud resource + status. + type: string + ready: + description: ready defines whether the IBM Cloud resource + is ready. + type: boolean + required: + - id + - ready + type: object + description: |- + workerSubnets references the VPC Subnets for the cluster's Data Plane. + The map simplifies lookups. + type: object + type: object + ready: + default: false + description: Ready is true when the provider resource is ready. + type: boolean + resourceGroup: + description: resourceGroup is the status of the cluster's Resource + Group for extended VPC Infrastructure support. + properties: + id: + description: id defines the Id of the IBM Cloud resource status. + type: string + name: + description: name defines the name of the IBM Cloud resource status. + type: string + ready: + description: ready defines whether the IBM Cloud resource is ready. + type: boolean + required: + - id + - ready + type: object + subnet: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + zone: + type: string + type: object + v1beta2: + description: V1beta2 groups all the fields that will be added or modified + in IBMVPCCluster's status with the V1Beta2 version. + properties: + conditions: + description: Conditions represents the observations of a IBMVPCCluster's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + vpc: + description: |- + Important: Run "make" to regenerate code after modifying this file + dep: rely on Network instead. + properties: + id: + type: string + name: + type: string + required: + - id + - name + type: object + vpcEndpoint: + description: VPCEndpoint describes a VPCEndpoint. + properties: + address: + type: string + floatingIPID: + description: 'Deprecated: This field has no function and is going + to be removed in the next release.' + type: string + loadBalancerIPID: + type: string + required: + - address + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmvpcmachines.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: Cg== + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: IBMVPCMachine + listKind: IBMVPCMachineList + plural: ibmvpcmachines + singular: ibmvpcmachine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster infrastructure is ready for IBM VPC instances + jsonPath: .status.ready + name: Ready + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: IBMVPCMachine is the Schema for the ibmvpcmachines API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMVPCMachineSpec defines the desired state of IBMVPCMachine. + properties: + bootVolume: + description: BootVolume contains machines's boot volume configurations + like size, iops etc.. + properties: + deleteVolumeOnInstanceDelete: + default: true + description: |- + DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. + Default is set as true + type: boolean + encryptionKeyCRN: + description: |- + EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN + and possible values are as follows. + The CRN of the [Key Protect Root + Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto + Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. + If unspecified, the `encryption` type for the volume will be `provider_managed`. + type: string + iops: + description: |- + Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile + family of `custom`. + format: int64 + type: integer + name: + description: |- + Name is the unique user-defined name for this volume. + Default will be autogenerated + type: string + profile: + default: general-purpose + description: |- + Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles + for more information. + Default to general-purpose + enum: + - general-purpose + - 5iops-tier + - 10iops-tier + - custom + type: string + sizeGiB: + description: |- + SizeGiB is the size of the virtual server's boot disk in GiB. + Default to the size of the image's `minimum_provisioned_size`. + format: int64 + type: integer + type: object + image: + description: |- + Image is the id of OS image which would be install on the instance. + Example: r134-ed3f775f-ad7e-4e37-ae62-7199b4988b00 + type: string + imageName: + description: ImageName is the name of OS image which would be install + on the instance. + type: string + name: + description: Name of the instance. + type: string + primaryNetworkInterface: + description: PrimaryNetworkInterface is required to specify subnet. + properties: + subnet: + description: Subnet ID of the network interface. + type: string + type: object + profile: + description: "Profile indicates the flavor of instance. Example: bx2-8x32\tmeans + 8 vCPUs\t32 GB RAM\t16 Gbps" + type: string + providerID: + description: ProviderID is the unique identifier as specified by the + cloud provider. + type: string + sshKeyNames: + description: SSHKeysNames is the SSH pub key names that will be used + to access VM. + items: + type: string + type: array + sshKeys: + description: SSHKeys is the SSH pub keys that will be used to access + VM. + items: + type: string + type: array + zone: + description: 'Zone is the place where the instance should be created. + Example: us-south-3' + type: string + required: + - zone + type: object + status: + description: IBMVPCMachineStatus defines the observed state of IBMVPCMachine. + properties: + addresses: + description: Addresses contains the GCP instance associated addresses. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + instanceID: + type: string + instanceState: + description: InstanceStatus is the status of the GCP instance for + this machine. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Cluster infrastructure is ready for IBM VPC instances + jsonPath: .status.ready + name: Ready + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: IBMVPCMachine is the Schema for the ibmvpcmachines API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMVPCMachineSpec defines the desired state of IBMVPCMachine. + properties: + bootVolume: + description: BootVolume contains machines's boot volume configurations + like size, iops etc.. + properties: + deleteVolumeOnInstanceDelete: + default: true + description: |- + DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. + Default is set as true + type: boolean + encryptionKeyCRN: + description: |- + EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN + and possible values are as follows. + The CRN of the [Key Protect Root + Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto + Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. + If unspecified, the `encryption` type for the volume will be `provider_managed`. + type: string + iops: + description: |- + Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile + family of `custom`. + format: int64 + type: integer + name: + description: |- + Name is the unique user-defined name for this volume. + Default will be autogenerated + type: string + profile: + default: general-purpose + description: |- + Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles + for more information. + Default to general-purpose + enum: + - general-purpose + - 5iops-tier + - 10iops-tier + - custom + type: string + sizeGiB: + description: |- + SizeGiB is the size of the virtual server's boot disk in GiB. + Default to the size of the image's `minimum_provisioned_size`. + format: int64 + type: integer + type: object + catalogOffering: + description: |- + CatalogOffering is the Catalog Offering OS image which would be installed on the instance. + An OfferingCRN or VersionCRN is required, the PlanCRN is optional. + properties: + offeringCRN: + description: |- + OfferingCRN defines the IBM Cloud Catalog Offering CRN. Using the OfferingCRN expects that the latest version of the Offering will be used. + If a specific version should be used instead, rely on VersionCRN. + type: string + planCRN: + description: PlanCRN defines the IBM Cloud Catalog Offering Plan + CRN to use for the Offering. + type: string + versionCRN: + description: VersionCRN defines the IBM Cloud Catalog Offering + Version CRN. A specific version of the Catalog Offering will + be used, as defined by this CRN. + type: string + type: object + x-kubernetes-validations: + - message: either offeringCRN or version CRN must be provided, not + both + rule: (has(self.offeringCRN) && !has(self.versionCRN)) || (!has(self.offeringCRN) + && has(self.versionCRN)) + image: + description: |- + Image is the OS image which would be install on the instance. + ID will take higher precedence over Name if both specified. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + type: object + loadBalancerPoolMembers: + description: LoadBalancerPoolMembers is the set of IBM Cloud VPC Load + Balancer Backend Pools the machine should be added to as a member. + items: + description: VPCLoadBalancerBackendPoolMember represents a VPC Load + Balancer Backend Pool Member. + properties: + loadBalancer: + description: LoadBalancer defines the Load Balancer the Pool + Member is for. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + pool: + description: Pool defines the Load Balancer Pool the Pool Member + should be in. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + port: + description: Port defines the Port the Load Balancer Pool Member + listens for traffic. + format: int64 + type: integer + weight: + description: Weight of the service member. Only applicable if + the pool algorithm is "weighted_round_robin". + format: int64 + type: integer + required: + - loadBalancer + - pool + - port + type: object + type: array + name: + description: Name of the instance. + type: string + placementTarget: + description: PlacementTarget is the placement restrictions to use + for the virtual server instance. No restrictions are used when this + field is not defined. + properties: + dedicatedHost: + description: DedicatedHost defines the Dedicated Host to place + a VPC Machine (Instance) on. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + dedicatedHostGroup: + description: DedicatedHostGroup defines the Dedicated Host Group + to use when placing a VPC Machine (Instance). + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + placementGroup: + description: PlacementGroup defines the Placement Group to use + when placing a VPC Machine (Instance). + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: object + x-kubernetes-validations: + - message: only one of dedicatedHost, dedicatedHostGroup, or placementGroup + must be defined for machine placement + rule: (has(self.dedicatedHost) && !has(self.dedicatedHostGroup) + && !has(self.placementGroup)) || (!has(self.dedicatedHost) && + has(self.dedicatedHostGroup) && !has(self.placementGroup)) || + (!has(self.dedicatedHost) && !has(self.dedicatedHostGroup) && + has(self.placementGroup)) + primaryNetworkInterface: + description: PrimaryNetworkInterface is required to specify subnet. + properties: + securityGroups: + description: SecurityGroups defines a set of IBM Cloud VPC Security + Groups to attach to the network interface. + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnet: + description: Subnet ID of the network interface. + type: string + type: object + profile: + description: "Profile indicates the flavor of instance. Example: bx2-8x32\tmeans + 8 vCPUs\t32 GB RAM\t16 Gbps" + type: string + providerID: + description: ProviderID is the unique identifier as specified by the + cloud provider. + type: string + sshKeys: + description: |- + SSHKeys is the SSH pub keys that will be used to access VM. + ID will take higher precedence over Name if both specified. + items: + description: |- + IBMVPCResourceReference is a reference to a specific VPC resource by ID or Name + Only one of ID or Name may be specified. Specifying more than one will result in + a validation error. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + type: object + type: array + zone: + description: 'Zone is the place where the instance should be created. + Example: us-south-3' + type: string + required: + - image + - zone + type: object + status: + description: IBMVPCMachineStatus defines the observed state of IBMVPCMachine. + properties: + addresses: + description: Addresses contains the IBM Cloud instance associated + addresses. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: Conditions deefines current service state of the IBMVPCMachine. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failureMessage: + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + type: string + failureReason: + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + type: string + instanceID: + description: InstanceID defines the IBM Cloud VPC Instance UUID. + type: string + instanceState: + description: InstanceStatus is the status of the IBM Cloud instance + for this machine. + type: string + loadBalancerPoolMembers: + description: LoadBalancerPoolMembers is the status of IBM Cloud VPC + Load Balancer Backend Pools the machine is a member. + items: + description: VPCLoadBalancerBackendPoolMember represents a VPC Load + Balancer Backend Pool Member. + properties: + loadBalancer: + description: LoadBalancer defines the Load Balancer the Pool + Member is for. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + pool: + description: Pool defines the Load Balancer Pool the Pool Member + should be in. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + port: + description: Port defines the Port the Load Balancer Pool Member + listens for traffic. + format: int64 + type: integer + weight: + description: Weight of the service member. Only applicable if + the pool algorithm is "weighted_round_robin". + format: int64 + type: integer + required: + - loadBalancer + - pool + - port + type: object + type: array + ready: + description: Ready is true when the provider resource is ready. + type: boolean + v1beta2: + description: V1beta2 groups all the fields that will be added or modified + in IBMVPCMachine's status with the V1Beta2 version. + properties: + conditions: + description: Conditions represents the observations of a IBMVPCMachine's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmvpcmachinetemplates.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: Cg== + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: IBMVPCMachineTemplate + listKind: IBMVPCMachineTemplateList + plural: ibmvpcmachinetemplates + singular: ibmvpcmachinetemplate + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: IBMVPCMachineTemplate is the Schema for the ibmvpcmachinetemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMVPCMachineTemplateSpec defines the desired state of IBMVPCMachineTemplate. + properties: + template: + description: IBMVPCMachineTemplateResource describes the data needed + to create am IBMVPCMachine from a template. + properties: + spec: + description: Spec is the specification of the desired behavior + of the machine. + properties: + bootVolume: + description: BootVolume contains machines's boot volume configurations + like size, iops etc.. + properties: + deleteVolumeOnInstanceDelete: + default: true + description: |- + DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. + Default is set as true + type: boolean + encryptionKeyCRN: + description: |- + EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN + and possible values are as follows. + The CRN of the [Key Protect Root + Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto + Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. + If unspecified, the `encryption` type for the volume will be `provider_managed`. + type: string + iops: + description: |- + Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile + family of `custom`. + format: int64 + type: integer + name: + description: |- + Name is the unique user-defined name for this volume. + Default will be autogenerated + type: string + profile: + default: general-purpose + description: |- + Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles + for more information. + Default to general-purpose + enum: + - general-purpose + - 5iops-tier + - 10iops-tier + - custom + type: string + sizeGiB: + description: |- + SizeGiB is the size of the virtual server's boot disk in GiB. + Default to the size of the image's `minimum_provisioned_size`. + format: int64 + type: integer + type: object + image: + description: |- + Image is the id of OS image which would be install on the instance. + Example: r134-ed3f775f-ad7e-4e37-ae62-7199b4988b00 + type: string + imageName: + description: ImageName is the name of OS image which would + be install on the instance. + type: string + name: + description: Name of the instance. + type: string + primaryNetworkInterface: + description: PrimaryNetworkInterface is required to specify + subnet. + properties: + subnet: + description: Subnet ID of the network interface. + type: string + type: object + profile: + description: "Profile indicates the flavor of instance. Example: + bx2-8x32\tmeans 8 vCPUs\t32 GB RAM\t16 Gbps" + type: string + providerID: + description: ProviderID is the unique identifier as specified + by the cloud provider. + type: string + sshKeyNames: + description: SSHKeysNames is the SSH pub key names that will + be used to access VM. + items: + type: string + type: array + sshKeys: + description: SSHKeys is the SSH pub keys that will be used + to access VM. + items: + type: string + type: array + zone: + description: 'Zone is the place where the instance should + be created. Example: us-south-3' + type: string + required: + - zone + type: object + required: + - spec + type: object + required: + - template + type: object + status: + description: IBMVPCMachineTemplateStatus defines the observed state of + IBMVPCMachineTemplate. + type: object + type: object + served: true + storage: false + - name: v1beta2 + schema: + openAPIV3Schema: + description: IBMVPCMachineTemplate is the Schema for the ibmvpcmachinetemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMVPCMachineTemplateSpec defines the desired state of IBMVPCMachineTemplate. + properties: + template: + description: IBMVPCMachineTemplateResource describes the data needed + to create am IBMVPCMachine from a template. + properties: + spec: + description: Spec is the specification of the desired behavior + of the machine. + properties: + bootVolume: + description: BootVolume contains machines's boot volume configurations + like size, iops etc.. + properties: + deleteVolumeOnInstanceDelete: + default: true + description: |- + DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. + Default is set as true + type: boolean + encryptionKeyCRN: + description: |- + EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN + and possible values are as follows. + The CRN of the [Key Protect Root + Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto + Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. + If unspecified, the `encryption` type for the volume will be `provider_managed`. + type: string + iops: + description: |- + Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile + family of `custom`. + format: int64 + type: integer + name: + description: |- + Name is the unique user-defined name for this volume. + Default will be autogenerated + type: string + profile: + default: general-purpose + description: |- + Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles + for more information. + Default to general-purpose + enum: + - general-purpose + - 5iops-tier + - 10iops-tier + - custom + type: string + sizeGiB: + description: |- + SizeGiB is the size of the virtual server's boot disk in GiB. + Default to the size of the image's `minimum_provisioned_size`. + format: int64 + type: integer + type: object + catalogOffering: + description: |- + CatalogOffering is the Catalog Offering OS image which would be installed on the instance. + An OfferingCRN or VersionCRN is required, the PlanCRN is optional. + properties: + offeringCRN: + description: |- + OfferingCRN defines the IBM Cloud Catalog Offering CRN. Using the OfferingCRN expects that the latest version of the Offering will be used. + If a specific version should be used instead, rely on VersionCRN. + type: string + planCRN: + description: PlanCRN defines the IBM Cloud Catalog Offering + Plan CRN to use for the Offering. + type: string + versionCRN: + description: VersionCRN defines the IBM Cloud Catalog + Offering Version CRN. A specific version of the Catalog + Offering will be used, as defined by this CRN. + type: string + type: object + x-kubernetes-validations: + - message: either offeringCRN or version CRN must be provided, + not both + rule: (has(self.offeringCRN) && !has(self.versionCRN)) || + (!has(self.offeringCRN) && has(self.versionCRN)) + image: + description: |- + Image is the OS image which would be install on the instance. + ID will take higher precedence over Name if both specified. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + type: object + loadBalancerPoolMembers: + description: LoadBalancerPoolMembers is the set of IBM Cloud + VPC Load Balancer Backend Pools the machine should be added + to as a member. + items: + description: VPCLoadBalancerBackendPoolMember represents + a VPC Load Balancer Backend Pool Member. + properties: + loadBalancer: + description: LoadBalancer defines the Load Balancer + the Pool Member is for. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + pool: + description: Pool defines the Load Balancer Pool the + Pool Member should be in. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + port: + description: Port defines the Port the Load Balancer + Pool Member listens for traffic. + format: int64 + type: integer + weight: + description: Weight of the service member. Only applicable + if the pool algorithm is "weighted_round_robin". + format: int64 + type: integer + required: + - loadBalancer + - pool + - port + type: object + type: array + name: + description: Name of the instance. + type: string + placementTarget: + description: PlacementTarget is the placement restrictions + to use for the virtual server instance. No restrictions + are used when this field is not defined. + properties: + dedicatedHost: + description: DedicatedHost defines the Dedicated Host + to place a VPC Machine (Instance) on. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + dedicatedHostGroup: + description: DedicatedHostGroup defines the Dedicated + Host Group to use when placing a VPC Machine (Instance). + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + placementGroup: + description: PlacementGroup defines the Placement Group + to use when placing a VPC Machine (Instance). + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: object + x-kubernetes-validations: + - message: only one of dedicatedHost, dedicatedHostGroup, + or placementGroup must be defined for machine placement + rule: (has(self.dedicatedHost) && !has(self.dedicatedHostGroup) + && !has(self.placementGroup)) || (!has(self.dedicatedHost) + && has(self.dedicatedHostGroup) && !has(self.placementGroup)) + || (!has(self.dedicatedHost) && !has(self.dedicatedHostGroup) + && has(self.placementGroup)) + primaryNetworkInterface: + description: PrimaryNetworkInterface is required to specify + subnet. + properties: + securityGroups: + description: SecurityGroups defines a set of IBM Cloud + VPC Security Groups to attach to the network interface. + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnet: + description: Subnet ID of the network interface. + type: string + type: object + profile: + description: "Profile indicates the flavor of instance. Example: + bx2-8x32\tmeans 8 vCPUs\t32 GB RAM\t16 Gbps" + type: string + providerID: + description: ProviderID is the unique identifier as specified + by the cloud provider. + type: string + sshKeys: + description: |- + SSHKeys is the SSH pub keys that will be used to access VM. + ID will take higher precedence over Name if both specified. + items: + description: |- + IBMVPCResourceReference is a reference to a specific VPC resource by ID or Name + Only one of ID or Name may be specified. Specifying more than one will result in + a validation error. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + type: object + type: array + zone: + description: 'Zone is the place where the instance should + be created. Example: us-south-3' + type: string + required: + - image + - zone + type: object + required: + - spec + type: object + required: + - template + type: object + status: + description: IBMVPCMachineTemplateStatus defines the observed state of + IBMVPCMachineTemplate. + properties: + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Capacity defines the resource capacity for this machine. + This value is used for autoscaling from zero operations as defined in: + https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmpowervsclusters.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: Cg== + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: infrastructure.cluster.x-k8s.io + names: + kind: IBMPowerVSCluster + listKind: IBMPowerVSClusterList + plural: ibmpowervsclusters + singular: ibmpowervscluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster to which this IBMPowerVSCluster belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Time duration since creation of IBMPowerVSCluster + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.serviceInstanceID + name: PowerVS Cloud Instance ID + priority: 1 + type: string + - description: Control Plane Endpoint + jsonPath: .spec.controlPlaneEndpoint.host + name: Endpoint + priority: 1 + type: string + - description: Control Plane Port + jsonPath: .spec.controlPlaneEndpoint.port + name: Port + priority: 1 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: IBMPowerVSCluster is the Schema for the ibmpowervsclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSClusterSpec defines the desired state of IBMPowerVSCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + network: + description: Network is the reference to the Network to use for this + cluster. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstanceID: + description: ServiceInstanceID is the id of the power cloud instance + where the vsi instance will get deployed. + minLength: 1 + type: string + required: + - network + - serviceInstanceID + type: object + status: + description: IBMPowerVSClusterStatus defines the observed state of IBMPowerVSCluster. + properties: + ready: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + type: boolean + required: + - ready + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Cluster to which this IBMPowerVSCluster belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Time duration since creation of IBMPowerVSCluster + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.serviceInstanceID + name: PowerVS Cloud Instance ID + priority: 1 + type: string + - description: Control Plane Endpoint + jsonPath: .spec.controlPlaneEndpoint.host + name: Endpoint + priority: 1 + type: string + - description: Control Plane Port + jsonPath: .spec.controlPlaneEndpoint.port + name: Port + priority: 1 + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: IBMPowerVSCluster is the Schema for the ibmpowervsclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSClusterSpec defines the desired state of IBMPowerVSCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + cosInstance: + description: |- + cosInstance contains options to configure a supporting IBM Cloud COS bucket for this + cluster - currently used for nodes requiring Ignition + (https://coreos.github.io/ignition/) for bootstrapping (requires + BootstrapFormatIgnition feature flag to be enabled). + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource and Ignition is set, then + 1. CosInstance.Name should be set not setting will result in webhook error. + 2. CosInstance.BucketName should be set not setting will result in webhook error. + 3. CosInstance.BucketRegion should be set not setting will result in webhook error. + properties: + bucketName: + description: bucketName is IBM cloud COS bucket name + type: string + bucketRegion: + description: bucketRegion is IBM cloud COS bucket region + type: string + name: + description: |- + name defines name of IBM cloud COS instance to be created. + when IBMPowerVSCluster.Ignition is set + maxLength: 63 + minLength: 3 + pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ + type: string + type: object + dhcpServer: + description: |- + dhcpServer is contains the configuration to be used while creating a new DHCP server in PowerVS workspace. + when the field is omitted, CLUSTER_NAME will be used as DHCPServer.Name and DHCP server will be created. + it will automatically create network with name DHCPSERVER_Private in PowerVS workspace. + properties: + cidr: + description: Optional cidr for DHCP private network + type: string + dnsServer: + default: 1.1.1.1 + description: Optional DNS Server for DHCP service + type: string + id: + description: Optional id of the existing DHCPServer + type: string + name: + description: Optional name of DHCP Service. Only alphanumeric + characters and dashes are allowed. + type: string + snat: + default: true + description: Optional indicates if SNAT will be enabled for DHCP + service + type: boolean + type: object + ignition: + description: Ignition defined options related to the bootstrapping + systems where Ignition is used. + properties: + version: + default: "2.3" + description: Version defines which version of Ignition will be + used to generate bootstrap data. + enum: + - "2.3" + - "2.4" + - "3.0" + - "3.1" + - "3.2" + - "3.3" + - "3.4" + type: string + type: object + loadBalancers: + description: |- + loadBalancers is optional configuration for configuring loadbalancers to control plane or data plane nodes. + when omitted system will create a default public loadbalancer with name CLUSTER_NAME-loadbalancer. + when specified a vpc loadbalancer will be created and controlPlaneEndpoint will be set with associated hostname of loadbalancer. + ControlPlaneEndpoint will be set with associated hostname of public loadbalancer. + when LoadBalancers[].ID is set, its expected that there exist a loadbalancer with ID or else system will give error. + when LoadBalancers[].Name is set, system will first check for loadbalancer with Name, if not exist system will create new loadbalancer. + For each loadbalancer a default backed pool and front listener will be configured with port 6443. + items: + description: VPCLoadBalancerSpec defines the desired state of an + VPC load balancer. + properties: + additionalListeners: + description: AdditionalListeners sets the additional listeners + for the control plane load balancer. + items: + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an VPC load balancer. + properties: + defaultPoolName: + description: defaultPoolName defines the name of a VPC + Load Balancer Backend Pool to use for the VPC Load Balancer + Listener. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + port: + description: Port sets the port for the additional listener. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: |- + protocol defines the protocol to use for the VPC Load Balancer Listener. + Will default to TCP protocol if not specified. + enum: + - http + - https + - tcp + - udp + type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + x-kubernetes-list-type: map + backendPools: + description: backendPools defines the load balancer's backend + pools. + items: + description: VPCLoadBalancerBackendPoolSpec defines the desired + configuration of a VPC Load Balancer Backend Pool. + properties: + algorithm: + description: algorithm defines the load balancing algorithm + to use. + enum: + - least_connections + - round_robin + - weighted_round_robin + type: string + healthMonitor: + description: healthMonitor defines the backend pool's + health monitor. + properties: + delay: + description: delay defines the seconds to wait between + health checks. + format: int64 + maximum: 60 + minimum: 2 + type: integer + port: + description: port defines the port to perform health + monitoring on. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + retries: + description: retries defines the max retries for health + check. + format: int64 + maximum: 10 + minimum: 1 + type: integer + timeout: + description: timeout defines the seconds to wait for + a health check response. + format: int64 + maximum: 59 + minimum: 1 + type: integer + type: + description: type defines the protocol used for health + checks. + enum: + - http + - https + - tcp + type: string + urlPath: + description: urlPath defines the URL to use for health + monitoring. + pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ + type: string + required: + - delay + - retries + - timeout + - type + type: object + name: + description: name defines the name of the Backend Pool. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + protocol: + description: protocol defines the protocol to use for + the Backend Pool. + enum: + - http + - https + - tcp + - udp + type: string + required: + - algorithm + - healthMonitor + - protocol + type: object + type: array + id: + description: id of the loadbalancer + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: Name sets the name of the VPC load balancer. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + public: + default: true + description: public indicates that load balancer is public or + private + type: boolean + securityGroups: + description: |- + securityGroups defines the Security Groups to attach to the load balancer. + Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnets: + description: |- + subnets defines the VPC Subnets to attach to the load balancer. + Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + type: object + type: array + network: + description: |- + Network is the reference to the Network to use for this cluster. + when the field is omitted, A DHCP service will be created in the Power VS workspace and its private network will be used. + the DHCP service created network will have the following name format + 1. in the case of DHCPServer.Name is not set the name will be DHCPSERVER_Private. + 2. if DHCPServer.Name is set the name will be DHCPSERVER_Private. + when Network.ID is set, its expected that there exist a network in PowerVS workspace with id or else system will give error. + when Network.Name is set, system will first check for network with Name in PowerVS workspace, if not exist system will check DHCP network with given Network.name, if that also not exist, it will create a new DHCP service and name will be DHCPSERVER_Private. + Network.RegEx is not yet supported and system will ignore the value. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + resourceGroup: + description: |- + resourceGroup name under which the resources will be created. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + 1. it is expected to set the ResourceGroup.Name, not setting will result in webhook error. + ResourceGroup.ID and ResourceGroup.Regex is not yet supported and system will ignore the value. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstance: + description: |- + serviceInstance is the reference to the Power VS server workspace on which the server instance(VM) will be created. + Power VS server workspace is a container for all Power VS instances at a specific geographic region. + serviceInstance can be created via IBM Cloud catalog or CLI. + supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + More detail about Power VS service instance. + https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + when omitted system will dynamically create the service instance with name CLUSTER_NAME-serviceInstance. + when ServiceInstance.ID is set, its expected that there exist a service instance in PowerVS workspace with id or else system will give error. + when ServiceInstance.Name is set, system will first check for service instance with Name in PowerVS workspace, if not exist system will create new instance. + if there are more than one service instance exist with the ServiceInstance.Name in given Zone, installation fails with an error. Use ServiceInstance.ID in those situations to use the specific service instance. + ServiceInstance.Regex is not yet supported not yet supported and system will ignore the value. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstanceID: + description: |- + ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. + Deprecated: use ServiceInstance instead + type: string + transitGateway: + description: |- + transitGateway contains information about IBM Cloud TransitGateway + IBM Cloud TransitGateway helps in establishing network connectivity between IBM Cloud Power VS and VPC infrastructure + more information about TransitGateway can be found here https://www.ibm.com/products/transit-gateway. + when TransitGateway.ID is set, its expected that there exist a TransitGateway with ID or else system will give error. + when TransitGateway.Name is set, system will first check for TransitGateway with Name, if not exist system will create new TransitGateway. + properties: + globalRouting: + description: |- + globalRouting indicates whether to set global routing true or not while creating the transit gateway. + set this field to true only when PowerVS and VPC are from different regions, if they are same it's suggested to use local routing by setting the field to false. + when the field is omitted, based on PowerVS region (region associated with IBMPowerVSCluster.Spec.Zone) and VPC region(IBMPowerVSCluster.Spec.VPC.Region) system will decide whether to enable globalRouting or not. + type: boolean + id: + description: id of resource. + type: string + name: + description: name of resource. + maxLength: 63 + minLength: 1 + pattern: ^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$ + type: string + type: object + vpc: + description: |- + vpc contains information about IBM Cloud VPC resources. + when omitted system will dynamically create the VPC with name CLUSTER_NAME-vpc. + when VPC.ID is set, its expected that there exist a VPC with ID or else system will give error. + when VPC.Name is set, system will first check for VPC with Name, if not exist system will create new VPC. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + 1. it is expected to set the VPC.Region, not setting will result in webhook error. + properties: + id: + description: id of resource. + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: name of resource. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + region: + description: |- + region of IBM Cloud VPC. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + it is expected to set the region, not setting will result in webhook error. + type: string + type: object + vpcSecurityGroups: + description: VPCSecurityGroups to attach it to the VPC resource + items: + description: VPCSecurityGroup defines a VPC Security Group that + should exist or be created within the specified VPC, with the + specified Security Group Rules. + properties: + id: + description: id of the Security Group. + type: string + name: + description: name of the Security Group. + type: string + rules: + description: rules are the Security Group Rules for the Security + Group. + items: + description: VPCSecurityGroupRule defines a VPC Security Group + Rule for a specified Security Group. + properties: + action: + description: action defines whether to allow or deny traffic + defined by the Security Group Rule. + enum: + - allow + - deny + type: string + destination: + description: |- + destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionOutbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports allowed + for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive upper + range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive lower + range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater than or equal + to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic protocol + used for the Security Group Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines the type of + filter to define for the remote's destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, and securityGroupName + are not valid for VPCSecurityGroupRuleRemoteTypeAny + remoteType + rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid for VPCSecurityGroupRuleRemoteTypeCIDR + remoteType + rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' ? (has(self.address) + && !has(self.cidrSubnetName) && !has(self.securityGroupName)) + : true' + - message: only securityGroupName is valid for VPCSecurityGroupRuleRemoteTypeSG + remoteType + rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) + && !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only supported for + VPCSecurityGroupRuleProtocolIcmp protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + direction: + description: direction defines whether the traffic is + inbound or outbound for the Security Group Rule. + enum: + - inbound + - outbound + type: string + securityGroupID: + description: securityGroupID is the ID of the Security + Group for the Security Group Rule. + type: string + source: + description: |- + source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionInbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports allowed + for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive upper + range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive lower + range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater than or equal + to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic protocol + used for the Security Group Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines the type of + filter to define for the remote's destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, and securityGroupName + are not valid for VPCSecurityGroupRuleRemoteTypeAny + remoteType + rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid for VPCSecurityGroupRuleRemoteTypeCIDR + remoteType + rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' ? (has(self.address) + && !has(self.cidrSubnetName) && !has(self.securityGroupName)) + : true' + - message: only securityGroupName is valid for VPCSecurityGroupRuleRemoteTypeSG + remoteType + rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) + && !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only supported for + VPCSecurityGroupRuleProtocolIcmp protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + required: + - action + - direction + type: object + x-kubernetes-validations: + - message: both destination and source cannot be provided + rule: (has(self.destination) && !has(self.source)) || (!has(self.destination) + && has(self.source)) + - message: source must be set for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? has(self.source) + : true' + - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? !has(self.destination) + : true' + - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? has(self.destination) + : true' + - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? !has(self.source) + : true' + type: array + tags: + description: tags are tags to add to the Security Group. + items: + type: string + type: array + type: object + x-kubernetes-validations: + - message: either an id or name must be specified + rule: has(self.id) || has(self.name) + type: array + vpcSubnets: + description: |- + vpcSubnets contains information about IBM Cloud VPC Subnet resources. + when omitted system will create the subnets in all the zone corresponding to VPC.Region, with name CLUSTER_NAME-vpcsubnet-ZONE_NAME. + possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. + when VPCSubnets[].ID is set, its expected that there exist a subnet with ID or else system will give error. + when VPCSubnets[].Zone is not set, a random zone is picked from available zones of VPC.Region. + when VPCSubnets[].Name is not set, system will set name as CLUSTER_NAME-vpcsubnet-INDEX. + if subnet with name VPCSubnets[].Name not found, system will create new subnet in VPCSubnets[].Zone. + items: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + zone: + type: string + type: object + type: array + zone: + description: |- + zone is the name of Power VS zone where the cluster will be created + possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + 1. it is expected to set the zone, not setting will result in webhook error. + 2. the zone should have PER capabilities, or else system will give error. + type: string + required: + - network + - serviceInstanceID + type: object + status: + description: IBMPowerVSClusterStatus defines the observed state of IBMPowerVSCluster. + properties: + conditions: + description: Conditions defines current service state of the IBMPowerVSCluster. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + cosInstance: + description: cosInstance is reference to IBM Cloud COS Instance resource. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + dhcpServer: + description: dhcpServer is the reference to the Power VS DHCP server. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + loadBalancers: + additionalProperties: + description: VPCLoadBalancerStatus defines the status VPC load balancer. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + hostname: + description: hostname is the hostname of load balancer. + type: string + id: + description: id of VPC load balancer. + type: string + state: + description: State is the status of the load balancer. + type: string + type: object + description: loadBalancers reference to IBM Cloud VPC Loadbalancer. + type: object + network: + description: networkID is the reference to the Power VS network to + use for this cluster. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + ready: + default: false + description: ready is true when the provider resource is ready. + type: boolean + resourceGroupID: + description: ResourceGroup is the reference to the Power VS resource + group under which the resources will be created. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + serviceInstance: + description: serviceInstance is the reference to the Power VS service + on which the server instance(VM) will be created. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + transitGateway: + description: transitGateway is reference to IBM Cloud TransitGateway. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + powerVSConnection: + description: powerVSConnection defines the powervs connection + status in transit gateway. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + vpcConnection: + description: vpcConnection defines the vpc connection status in + transit gateway. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + type: object + v1beta2: + description: v1beta2 groups all the fields that will be added or modified + in IBMPowerVSCluster's status with the V1Beta2 version. + properties: + conditions: + description: conditions represents the observations of a DevCluster's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + vpc: + description: vpc is reference to IBM Cloud VPC resources. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + vpcSecurityGroups: + additionalProperties: + description: VPCSecurityGroupStatus defines a vpc security group + resource status with its id and respective rule's ids. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + ruleIDs: + description: rules contains the id of rules created under the + security group + items: + type: string + type: array + type: object + description: vpcSecurityGroups is reference to IBM Cloud VPC security + group. + type: object + vpcSubnet: + additionalProperties: + description: ResourceReference identifies a resource with id. + properties: + controllerCreated: + default: false + description: controllerCreated indicates whether the resource + is created by the controller. + type: boolean + id: + description: id represents the id of the resource. + type: string + type: object + description: vpcSubnet is reference to IBM Cloud VPC subnet. + type: object + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmpowervsmachines.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: Cg== + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: infrastructure.cluster.x-k8s.io + names: + kind: IBMPowerVSMachine + listKind: IBMPowerVSMachineList + plural: ibmpowervsmachines + singular: ibmpowervsmachine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster to which this IBMPowerVSMachine belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Machine object to which this IBMPowerVSMachine belongs + jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name + name: Machine + priority: 1 + type: string + - description: Time duration since creation of IBMPowerVSMachine + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Cluster infrastructure is ready for IBM PowerVS instances + jsonPath: .status.ready + name: Ready + type: string + - description: Instance Internal Addresses + jsonPath: .status.addresses[?(@.type=="InternalIP")].address + name: Internal-IP + priority: 1 + type: string + - description: Instance External Addresses + jsonPath: .status.addresses[?(@.type=="ExternalIP")].address + name: External-IP + priority: 1 + type: string + - description: PowerVS instance state + jsonPath: .status.instanceState + name: State + type: string + - description: PowerVS instance health + jsonPath: .status.health + name: Health + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: IBMPowerVSMachine is the Schema for the ibmpowervsmachines API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSMachineSpec defines the desired state of IBMPowerVSMachine. + properties: + image: + description: Image is the reference to the Image from which to create + the machine instance. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + imageRef: + description: |- + ImageRef is an optional reference to a provider-specific resource that holds + the details for provisioning the Image for a Cluster. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + memory: + description: Memory is Amount of memory allocated (in GB) + type: string + network: + description: Network is the reference to the Network to use for this + instance. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + procType: + description: 'ProcType is the processor type, e.g: dedicated, shared, + capped' + type: string + processors: + description: Processors is Number of processors allocated. + pattern: ^\d+(\.)?(\d)?(\d)?$ + type: string + providerID: + description: ProviderID is the unique identifier as specified by the + cloud provider. + type: string + serviceInstanceID: + description: ServiceInstanceID is the id of the power cloud instance + where the vsi instance will get deployed. + minLength: 1 + type: string + sshKey: + description: SSHKey is the name of the SSH key pair provided to the + vsi for authenticating users. + type: string + sysType: + description: SysType is the System type used to host the vsi. + type: string + required: + - network + - serviceInstanceID + type: object + status: + description: IBMPowerVSMachineStatus defines the observed state of IBMPowerVSMachine. + properties: + addresses: + description: Addresses contains the vsi associated addresses. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: Conditions defines current service state of the IBMPowerVSMachine. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failureMessage: + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. + type: string + failureReason: + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. + type: string + fault: + description: Fault will report if any fault messages for the vsi. + type: string + health: + description: Health is the health of the vsi. + type: string + instanceID: + type: string + instanceState: + description: InstanceState is the status of the vsi. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + region: + description: Region specifies the Power VS Service instance region. + type: string + zone: + description: Zone specifies the Power VS Service instance zone. + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Cluster to which this IBMPowerVSMachine belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Machine object to which this IBMPowerVSMachine belongs + jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name + name: Machine + priority: 1 + type: string + - description: Time duration since creation of IBMPowerVSMachine + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Cluster infrastructure is ready for IBM PowerVS instances + jsonPath: .status.ready + name: Ready + type: string + - description: Instance Internal Addresses + jsonPath: .status.addresses[?(@.type=="InternalIP")].address + name: Internal-IP + priority: 1 + type: string + - description: Instance External Addresses + jsonPath: .status.addresses[?(@.type=="ExternalIP")].address + name: External-IP + priority: 1 + type: string + - description: PowerVS instance state + jsonPath: .status.instanceState + name: State + type: string + - description: PowerVS instance health + jsonPath: .status.health + name: Health + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: IBMPowerVSMachine is the Schema for the ibmpowervsmachines API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSMachineSpec defines the desired state of IBMPowerVSMachine. + properties: + image: + description: |- + Image the reference to the image which is used to create the instance. + supported image identifier in IBMPowerVSResourceReference are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + imageRef: + description: |- + ImageRef is an optional reference to a provider-specific resource that holds + the details for provisioning the Image for a Cluster. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + memoryGiB: + description: |- + memoryGiB is the size of a virtual machine's memory, in GiB. + maximum value for the MemoryGiB depends on the selected SystemType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud + The minimum memory is 2 GiB. + When omitted, this means the user has no opinion and the platform is left to choose a reasonable + default, which is subject to change over time. The current default is 2. + format: int32 + type: integer + network: + description: |- + Network is the reference to the Network to use for this instance. + supported network identifier in IBMPowerVSResourceReference are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + processorType: + description: |- + processorType is the VM instance processor type. + It must be set to one of the following values: Dedicated, Capped or Shared. + Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. + Shared: Shared among other clients. + Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. + if the processorType is selected as Dedicated, then processors value cannot be fractional. + When omitted, this means that the user has no opinion and the platform is left to choose a + reasonable default, which is subject to change over time. The current default is Shared. + enum: + - Dedicated + - Shared + - Capped + - "" + type: string + processors: + anyOf: + - type: integer + - type: string + description: |- + processors is the number of virtual processors in a virtual machine. + when the processorType is selected as Dedicated the processors value cannot be fractional. + maximum value for the Processors depends on the selected SystemType, + and minimum value for Processors depends on the selected ProcessorType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud. + when ProcessorType is set as Shared or Capped, The minimum processors is 0.25. + when ProcessorType is set as Dedicated, The minimum processors is 1. + When omitted, this means that the user has no opinion and the platform is left to choose a + reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. + when ProcessorType selected as Dedicated, the default is set to 1. + when ProcessorType selected as Shared or Capped, the default is set to 0.25. + x-kubernetes-int-or-string: true + providerID: + description: ProviderID is the unique identifier as specified by the + cloud provider. + type: string + serviceInstance: + description: |- + serviceInstance is the reference to the Power VS workspace on which the server instance(VM) will be created. + Power VS workspace is a container for all Power VS instances at a specific geographic region. + serviceInstance can be created via IBM Cloud catalog or CLI. + supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + More detail about Power VS service instance. + https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + when omitted system will dynamically create the service instance + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstanceID: + description: |- + ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. + Deprecated: use ServiceInstance instead + type: string + sshKey: + description: SSHKey is the name of the SSH key pair provided to the + vsi for authenticating users. + type: string + systemType: + description: |- + systemType is the System type used to host the instance. + systemType determines the number of cores and memory that is available. + Few of the supported SystemTypes are s922,e980,s1022,e1050,e1080. + When omitted, this means that the user has no opinion and the platform is left to choose a + reasonable default, which is subject to change over time. The current default is s922 which is generally available. + enum: + - s922 + - e980 + - s1022 + - e1050 + - e1080 + - "" + type: string + required: + - network + - serviceInstanceID + type: object + status: + description: IBMPowerVSMachineStatus defines the observed state of IBMPowerVSMachine. + properties: + addresses: + description: Addresses contains the vsi associated addresses. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: Conditions defines current service state of the IBMPowerVSMachine. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failureMessage: + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. + + Deprecated: This field is deprecated and is going to be removed in the next apiVersion. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. + type: string + failureReason: + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. + + Deprecated: This field is deprecated and is going to be removed in the next apiVersion. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. + type: string + fault: + description: Fault will report if any fault messages for the vsi. + type: string + health: + description: Health is the health of the vsi. + type: string + instanceID: + type: string + instanceState: + description: InstanceState is the status of the vsi. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + region: + description: Region specifies the Power VS Service instance region. + type: string + v1beta2: + description: v1beta2 groups all the fields that will be added or modified + in IBMPowerVSMachine's status with the V1Beta2 version. + properties: + conditions: + description: |- + conditions represents the observations of a IBMPowerVSMachine's current state. + Known condition types are Ready, InstanceReady and Paused. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + zone: + description: Zone specifies the Power VS Service instance zone. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmpowervsmachinetemplates.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: Cg== + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: infrastructure.cluster.x-k8s.io + names: + kind: IBMPowerVSMachineTemplate + listKind: IBMPowerVSMachineTemplateList + plural: ibmpowervsmachinetemplates + singular: ibmpowervsmachinetemplate + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: IBMPowerVSMachineTemplate is the Schema for the ibmpowervsmachinetemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSMachineTemplateSpec defines the desired state of + IBMPowerVSMachineTemplate. + properties: + template: + description: IBMPowerVSMachineTemplateResource holds the IBMPowerVSMachine + spec. + properties: + spec: + description: IBMPowerVSMachineSpec defines the desired state of + IBMPowerVSMachine. + properties: + image: + description: Image is the reference to the Image from which + to create the machine instance. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + imageRef: + description: |- + ImageRef is an optional reference to a provider-specific resource that holds + the details for provisioning the Image for a Cluster. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + memory: + description: Memory is Amount of memory allocated (in GB) + type: string + network: + description: Network is the reference to the Network to use + for this instance. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + procType: + description: 'ProcType is the processor type, e.g: dedicated, + shared, capped' + type: string + processors: + description: Processors is Number of processors allocated. + pattern: ^\d+(\.)?(\d)?(\d)?$ + type: string + providerID: + description: ProviderID is the unique identifier as specified + by the cloud provider. + type: string + serviceInstanceID: + description: ServiceInstanceID is the id of the power cloud + instance where the vsi instance will get deployed. + minLength: 1 + type: string + sshKey: + description: SSHKey is the name of the SSH key pair provided + to the vsi for authenticating users. + type: string + sysType: + description: SysType is the System type used to host the vsi. + type: string + required: + - network + - serviceInstanceID + type: object + required: + - spec + type: object + required: + - template + type: object + status: + description: IBMPowerVSMachineTemplateStatus defines the observed state + of IBMPowerVSMachineTemplate. + properties: + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Capacity defines the resource capacity for this machine. + This value is used for autoscaling from zero operations as defined in: + https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md + type: object + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1beta2 + schema: + openAPIV3Schema: + description: IBMPowerVSMachineTemplate is the Schema for the ibmpowervsmachinetemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSMachineTemplateSpec defines the desired state of + IBMPowerVSMachineTemplate. + properties: + template: + description: IBMPowerVSMachineTemplateResource holds the IBMPowerVSMachine + spec. + properties: + spec: + description: IBMPowerVSMachineSpec defines the desired state of + IBMPowerVSMachine. + properties: + image: + description: |- + Image the reference to the image which is used to create the instance. + supported image identifier in IBMPowerVSResourceReference are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + imageRef: + description: |- + ImageRef is an optional reference to a provider-specific resource that holds + the details for provisioning the Image for a Cluster. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + memoryGiB: + description: |- + memoryGiB is the size of a virtual machine's memory, in GiB. + maximum value for the MemoryGiB depends on the selected SystemType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud + The minimum memory is 2 GiB. + When omitted, this means the user has no opinion and the platform is left to choose a reasonable + default, which is subject to change over time. The current default is 2. + format: int32 + type: integer + network: + description: |- + Network is the reference to the Network to use for this instance. + supported network identifier in IBMPowerVSResourceReference are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + processorType: + description: |- + processorType is the VM instance processor type. + It must be set to one of the following values: Dedicated, Capped or Shared. + Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. + Shared: Shared among other clients. + Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. + if the processorType is selected as Dedicated, then processors value cannot be fractional. + When omitted, this means that the user has no opinion and the platform is left to choose a + reasonable default, which is subject to change over time. The current default is Shared. + enum: + - Dedicated + - Shared + - Capped + - "" + type: string + processors: + anyOf: + - type: integer + - type: string + description: |- + processors is the number of virtual processors in a virtual machine. + when the processorType is selected as Dedicated the processors value cannot be fractional. + maximum value for the Processors depends on the selected SystemType, + and minimum value for Processors depends on the selected ProcessorType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud. + when ProcessorType is set as Shared or Capped, The minimum processors is 0.25. + when ProcessorType is set as Dedicated, The minimum processors is 1. + When omitted, this means that the user has no opinion and the platform is left to choose a + reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. + when ProcessorType selected as Dedicated, the default is set to 1. + when ProcessorType selected as Shared or Capped, the default is set to 0.25. + x-kubernetes-int-or-string: true + providerID: + description: ProviderID is the unique identifier as specified + by the cloud provider. + type: string + serviceInstance: + description: |- + serviceInstance is the reference to the Power VS workspace on which the server instance(VM) will be created. + Power VS workspace is a container for all Power VS instances at a specific geographic region. + serviceInstance can be created via IBM Cloud catalog or CLI. + supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + More detail about Power VS service instance. + https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + when omitted system will dynamically create the service instance + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstanceID: + description: |- + ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. + Deprecated: use ServiceInstance instead + type: string + sshKey: + description: SSHKey is the name of the SSH key pair provided + to the vsi for authenticating users. + type: string + systemType: + description: |- + systemType is the System type used to host the instance. + systemType determines the number of cores and memory that is available. + Few of the supported SystemTypes are s922,e980,s1022,e1050,e1080. + When omitted, this means that the user has no opinion and the platform is left to choose a + reasonable default, which is subject to change over time. The current default is s922 which is generally available. + enum: + - s922 + - e980 + - s1022 + - e1050 + - e1080 + - "" + type: string + required: + - network + - serviceInstanceID + type: object + required: + - spec + type: object + required: + - template + type: object + status: + description: IBMPowerVSMachineTemplateStatus defines the observed state + of IBMPowerVSMachineTemplate. + properties: + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Capacity defines the resource capacity for this machine. + This value is used for autoscaling from zero operations as defined in: + https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmpowervsimages.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: Cg== + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: infrastructure.cluster.x-k8s.io + names: + kind: IBMPowerVSImage + listKind: IBMPowerVSImageList + plural: ibmpowervsimages + singular: ibmpowervsimage + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: PowerVS image state + jsonPath: .status.imageState + name: State + type: string + - description: Image is ready for IBM PowerVS instances + jsonPath: .status.ready + name: Ready + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: IBMPowerVSImage is the Schema for the ibmpowervsimages API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSImageSpec defines the desired state of IBMPowerVSImage. + properties: + bucket: + description: Cloud Object Storage bucket name; bucket-name[/optional/folder] + type: string + clusterName: + description: ClusterName is the name of the Cluster this object belongs + to. + minLength: 1 + type: string + deletePolicy: + default: delete + description: DeletePolicy defines the policy used to identify images + to be preserved beyond the lifecycle of associated cluster. + enum: + - delete + - retain + type: string + object: + description: Cloud Object Storage image filename. + type: string + region: + description: Cloud Object Storage region. + type: string + serviceInstanceID: + description: ServiceInstanceID is the id of the power cloud instance + where the image will get imported. + type: string + storageType: + default: tier1 + description: Type of storage, storage pool with the most available + space will be selected. + enum: + - tier0 + - tier1 + - tier3 + type: string + required: + - bucket + - clusterName + - object + - region + - serviceInstanceID + type: object + status: + description: IBMPowerVSImageStatus defines the observed state of IBMPowerVSImage. + properties: + conditions: + description: Conditions defines current service state of the IBMPowerVSImage. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + imageID: + description: ImageID is the id of the imported image. + type: string + imageState: + description: ImageState is the status of the imported image. + type: string + jobID: + description: JobID is the job ID of an import operation. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: PowerVS image state + jsonPath: .status.imageState + name: State + type: string + - description: Image is ready for IBM PowerVS instances + jsonPath: .status.ready + name: Ready + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: IBMPowerVSImage is the Schema for the ibmpowervsimages API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSImageSpec defines the desired state of IBMPowerVSImage. + properties: + bucket: + description: Cloud Object Storage bucket name; bucket-name[/optional/folder] + type: string + clusterName: + description: ClusterName is the name of the Cluster this object belongs + to. + minLength: 1 + type: string + deletePolicy: + default: delete + description: DeletePolicy defines the policy used to identify images + to be preserved beyond the lifecycle of associated cluster. + enum: + - delete + - retain + type: string + object: + description: Cloud Object Storage image filename. + type: string + region: + description: Cloud Object Storage region. + type: string + serviceInstance: + description: |- + serviceInstance is the reference to the Power VS workspace on which the server instance(VM) will be created. + Power VS workspace is a container for all Power VS instances at a specific geographic region. + serviceInstance can be created via IBM Cloud catalog or CLI. + supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + More detail about Power VS service instance. + https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + when omitted system will dynamically create the service instance + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstanceID: + description: |- + ServiceInstanceID is the id of the power cloud instance where the image will get imported. + Deprecated: use ServiceInstance instead + type: string + storageType: + default: tier1 + description: Type of storage, storage pool with the most available + space will be selected. + enum: + - tier0 + - tier1 + - tier3 + type: string + required: + - bucket + - clusterName + - object + - region + - serviceInstanceID + type: object + status: + description: IBMPowerVSImageStatus defines the observed state of IBMPowerVSImage. + properties: + conditions: + description: Conditions defines current service state of the IBMPowerVSImage. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + imageID: + description: ImageID is the id of the imported image. + type: string + imageState: + description: ImageState is the status of the imported image. + type: string + jobID: + description: JobID is the job ID of an import operation. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + v1beta2: + description: v1beta2 groups all the fields that will be added or modified + in IBMPowerVSCluster's status with the V1Beta2 version. + properties: + conditions: + description: conditions represents the observations of a DevCluster's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmpowervsclustertemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: IBMPowerVSClusterTemplate + listKind: IBMPowerVSClusterTemplateList + plural: ibmpowervsclustertemplates + shortNames: + - ibmpowervsct + singular: ibmpowervsclustertemplate + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Time duration since creation of IBMPowerVSClusterTemplate + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: IBMPowerVSClusterTemplate is the schema for IBM Power VS Kubernetes + Cluster Templates. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSClusterTemplateSpec defines the desired state of + IBMPowerVSClusterTemplate. + properties: + template: + description: IBMPowerVSClusterTemplateResource describes the data + needed to create an IBMPowerVSCluster from a template. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: IBMPowerVSClusterSpec defines the desired state of + IBMPowerVSCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint + used to communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server + is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server + is serving. + format: int32 + type: integer + required: + - host + - port + type: object + network: + description: Network is the reference to the Network to use + for this cluster. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstanceID: + description: ServiceInstanceID is the id of the power cloud + instance where the vsi instance will get deployed. + minLength: 1 + type: string + required: + - network + - serviceInstanceID + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - description: Time duration since creation of IBMPowerVSClusterTemplate + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IBMPowerVSClusterTemplate is the schema for IBM Power VS Kubernetes + Cluster Templates. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMPowerVSClusterTemplateSpec defines the desired state of + IBMPowerVSClusterTemplate. + properties: + template: + description: IBMPowerVSClusterTemplateResource describes the data + needed to create an IBMPowerVSCluster from a template. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: IBMPowerVSClusterSpec defines the desired state of + IBMPowerVSCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint + used to communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server + is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server + is serving. + format: int32 + type: integer + required: + - host + - port + type: object + cosInstance: + description: |- + cosInstance contains options to configure a supporting IBM Cloud COS bucket for this + cluster - currently used for nodes requiring Ignition + (https://coreos.github.io/ignition/) for bootstrapping (requires + BootstrapFormatIgnition feature flag to be enabled). + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource and Ignition is set, then + 1. CosInstance.Name should be set not setting will result in webhook error. + 2. CosInstance.BucketName should be set not setting will result in webhook error. + 3. CosInstance.BucketRegion should be set not setting will result in webhook error. + properties: + bucketName: + description: bucketName is IBM cloud COS bucket name + type: string + bucketRegion: + description: bucketRegion is IBM cloud COS bucket region + type: string + name: + description: |- + name defines name of IBM cloud COS instance to be created. + when IBMPowerVSCluster.Ignition is set + maxLength: 63 + minLength: 3 + pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ + type: string + type: object + dhcpServer: + description: |- + dhcpServer is contains the configuration to be used while creating a new DHCP server in PowerVS workspace. + when the field is omitted, CLUSTER_NAME will be used as DHCPServer.Name and DHCP server will be created. + it will automatically create network with name DHCPSERVER_Private in PowerVS workspace. + properties: + cidr: + description: Optional cidr for DHCP private network + type: string + dnsServer: + default: 1.1.1.1 + description: Optional DNS Server for DHCP service + type: string + id: + description: Optional id of the existing DHCPServer + type: string + name: + description: Optional name of DHCP Service. Only alphanumeric + characters and dashes are allowed. + type: string + snat: + default: true + description: Optional indicates if SNAT will be enabled + for DHCP service + type: boolean + type: object + ignition: + description: Ignition defined options related to the bootstrapping + systems where Ignition is used. + properties: + version: + default: "2.3" + description: Version defines which version of Ignition + will be used to generate bootstrap data. + enum: + - "2.3" + - "2.4" + - "3.0" + - "3.1" + - "3.2" + - "3.3" + - "3.4" + type: string + type: object + loadBalancers: + description: |- + loadBalancers is optional configuration for configuring loadbalancers to control plane or data plane nodes. + when omitted system will create a default public loadbalancer with name CLUSTER_NAME-loadbalancer. + when specified a vpc loadbalancer will be created and controlPlaneEndpoint will be set with associated hostname of loadbalancer. + ControlPlaneEndpoint will be set with associated hostname of public loadbalancer. + when LoadBalancers[].ID is set, its expected that there exist a loadbalancer with ID or else system will give error. + when LoadBalancers[].Name is set, system will first check for loadbalancer with Name, if not exist system will create new loadbalancer. + For each loadbalancer a default backed pool and front listener will be configured with port 6443. + items: + description: VPCLoadBalancerSpec defines the desired state + of an VPC load balancer. + properties: + additionalListeners: + description: AdditionalListeners sets the additional + listeners for the control plane load balancer. + items: + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an VPC load balancer. + properties: + defaultPoolName: + description: defaultPoolName defines the name + of a VPC Load Balancer Backend Pool to use for + the VPC Load Balancer Listener. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + port: + description: Port sets the port for the additional + listener. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: |- + protocol defines the protocol to use for the VPC Load Balancer Listener. + Will default to TCP protocol if not specified. + enum: + - http + - https + - tcp + - udp + type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + x-kubernetes-list-type: map + backendPools: + description: backendPools defines the load balancer's + backend pools. + items: + description: VPCLoadBalancerBackendPoolSpec defines + the desired configuration of a VPC Load Balancer + Backend Pool. + properties: + algorithm: + description: algorithm defines the load balancing + algorithm to use. + enum: + - least_connections + - round_robin + - weighted_round_robin + type: string + healthMonitor: + description: healthMonitor defines the backend + pool's health monitor. + properties: + delay: + description: delay defines the seconds to + wait between health checks. + format: int64 + maximum: 60 + minimum: 2 + type: integer + port: + description: port defines the port to perform + health monitoring on. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + retries: + description: retries defines the max retries + for health check. + format: int64 + maximum: 10 + minimum: 1 + type: integer + timeout: + description: timeout defines the seconds to + wait for a health check response. + format: int64 + maximum: 59 + minimum: 1 + type: integer + type: + description: type defines the protocol used + for health checks. + enum: + - http + - https + - tcp + type: string + urlPath: + description: urlPath defines the URL to use + for health monitoring. + pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ + type: string + required: + - delay + - retries + - timeout + - type + type: object + name: + description: name defines the name of the Backend + Pool. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + protocol: + description: protocol defines the protocol to + use for the Backend Pool. + enum: + - http + - https + - tcp + - udp + type: string + required: + - algorithm + - healthMonitor + - protocol + type: object + type: array + id: + description: id of the loadbalancer + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: Name sets the name of the VPC load balancer. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + public: + default: true + description: public indicates that load balancer is + public or private + type: boolean + securityGroups: + description: |- + securityGroups defines the Security Groups to attach to the load balancer. + Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnets: + description: |- + subnets defines the VPC Subnets to attach to the load balancer. + Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + type: object + type: array + network: + description: |- + Network is the reference to the Network to use for this cluster. + when the field is omitted, A DHCP service will be created in the Power VS workspace and its private network will be used. + the DHCP service created network will have the following name format + 1. in the case of DHCPServer.Name is not set the name will be DHCPSERVER_Private. + 2. if DHCPServer.Name is set the name will be DHCPSERVER_Private. + when Network.ID is set, its expected that there exist a network in PowerVS workspace with id or else system will give error. + when Network.Name is set, system will first check for network with Name in PowerVS workspace, if not exist system will check DHCP network with given Network.name, if that also not exist, it will create a new DHCP service and name will be DHCPSERVER_Private. + Network.RegEx is not yet supported and system will ignore the value. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + resourceGroup: + description: |- + resourceGroup name under which the resources will be created. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + 1. it is expected to set the ResourceGroup.Name, not setting will result in webhook error. + ResourceGroup.ID and ResourceGroup.Regex is not yet supported and system will ignore the value. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstance: + description: |- + serviceInstance is the reference to the Power VS server workspace on which the server instance(VM) will be created. + Power VS server workspace is a container for all Power VS instances at a specific geographic region. + serviceInstance can be created via IBM Cloud catalog or CLI. + supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + More detail about Power VS service instance. + https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + when omitted system will dynamically create the service instance with name CLUSTER_NAME-serviceInstance. + when ServiceInstance.ID is set, its expected that there exist a service instance in PowerVS workspace with id or else system will give error. + when ServiceInstance.Name is set, system will first check for service instance with Name in PowerVS workspace, if not exist system will create new instance. + if there are more than one service instance exist with the ServiceInstance.Name in given Zone, installation fails with an error. Use ServiceInstance.ID in those situations to use the specific service instance. + ServiceInstance.Regex is not yet supported not yet supported and system will ignore the value. + properties: + id: + description: ID of resource + minLength: 1 + type: string + name: + description: Name of resource + minLength: 1 + type: string + regex: + description: |- + Regular expression to match resource, + In case of multiple resources matches the provided regular expression the first matched resource will be selected + minLength: 1 + type: string + type: object + serviceInstanceID: + description: |- + ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. + Deprecated: use ServiceInstance instead + type: string + transitGateway: + description: |- + transitGateway contains information about IBM Cloud TransitGateway + IBM Cloud TransitGateway helps in establishing network connectivity between IBM Cloud Power VS and VPC infrastructure + more information about TransitGateway can be found here https://www.ibm.com/products/transit-gateway. + when TransitGateway.ID is set, its expected that there exist a TransitGateway with ID or else system will give error. + when TransitGateway.Name is set, system will first check for TransitGateway with Name, if not exist system will create new TransitGateway. + properties: + globalRouting: + description: |- + globalRouting indicates whether to set global routing true or not while creating the transit gateway. + set this field to true only when PowerVS and VPC are from different regions, if they are same it's suggested to use local routing by setting the field to false. + when the field is omitted, based on PowerVS region (region associated with IBMPowerVSCluster.Spec.Zone) and VPC region(IBMPowerVSCluster.Spec.VPC.Region) system will decide whether to enable globalRouting or not. + type: boolean + id: + description: id of resource. + type: string + name: + description: name of resource. + maxLength: 63 + minLength: 1 + pattern: ^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$ + type: string + type: object + vpc: + description: |- + vpc contains information about IBM Cloud VPC resources. + when omitted system will dynamically create the VPC with name CLUSTER_NAME-vpc. + when VPC.ID is set, its expected that there exist a VPC with ID or else system will give error. + when VPC.Name is set, system will first check for VPC with Name, if not exist system will create new VPC. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + 1. it is expected to set the VPC.Region, not setting will result in webhook error. + properties: + id: + description: id of resource. + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: name of resource. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + region: + description: |- + region of IBM Cloud VPC. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + it is expected to set the region, not setting will result in webhook error. + type: string + type: object + vpcSecurityGroups: + description: VPCSecurityGroups to attach it to the VPC resource + items: + description: VPCSecurityGroup defines a VPC Security Group + that should exist or be created within the specified VPC, + with the specified Security Group Rules. + properties: + id: + description: id of the Security Group. + type: string + name: + description: name of the Security Group. + type: string + rules: + description: rules are the Security Group Rules for + the Security Group. + items: + description: VPCSecurityGroupRule defines a VPC Security + Group Rule for a specified Security Group. + properties: + action: + description: action defines whether to allow or + deny traffic defined by the Security Group Rule. + enum: + - allow + - deny + type: string + destination: + description: |- + destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionOutbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports + allowed for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive + upper range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive + lower range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater than + or equal to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic + protocol used for the Security Group Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines the + type of filter to define for the remote's + destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, and + securityGroupName are not valid for + VPCSecurityGroupRuleRemoteTypeAny remoteType + rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid + for VPCSecurityGroupRuleRemoteTypeCIDR + remoteType + rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' + ? (has(self.address) && !has(self.cidrSubnetName) + && !has(self.securityGroupName)) : true' + - message: only securityGroupName is valid + for VPCSecurityGroupRuleRemoteTypeSG + remoteType + rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) + && !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only supported + for VPCSecurityGroupRuleProtocolIcmp protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + direction: + description: direction defines whether the traffic + is inbound or outbound for the Security Group + Rule. + enum: + - inbound + - outbound + type: string + securityGroupID: + description: securityGroupID is the ID of the + Security Group for the Security Group Rule. + type: string + source: + description: |- + source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionInbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports + allowed for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive + upper range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive + lower range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater than + or equal to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic + protocol used for the Security Group Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines the + type of filter to define for the remote's + destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, and + securityGroupName are not valid for + VPCSecurityGroupRuleRemoteTypeAny remoteType + rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid + for VPCSecurityGroupRuleRemoteTypeCIDR + remoteType + rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) + && !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' + ? (has(self.address) && !has(self.cidrSubnetName) + && !has(self.securityGroupName)) : true' + - message: only securityGroupName is valid + for VPCSecurityGroupRuleRemoteTypeSG + remoteType + rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) + && !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only supported + for VPCSecurityGroupRuleProtocolIcmp protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + required: + - action + - direction + type: object + x-kubernetes-validations: + - message: both destination and source cannot be provided + rule: (has(self.destination) && !has(self.source)) + || (!has(self.destination) && has(self.source)) + - message: source must be set for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? has(self.source) + : true' + - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? !has(self.destination) + : true' + - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? has(self.destination) + : true' + - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? !has(self.source) + : true' + type: array + tags: + description: tags are tags to add to the Security Group. + items: + type: string + type: array + type: object + x-kubernetes-validations: + - message: either an id or name must be specified + rule: has(self.id) || has(self.name) + type: array + vpcSubnets: + description: |- + vpcSubnets contains information about IBM Cloud VPC Subnet resources. + when omitted system will create the subnets in all the zone corresponding to VPC.Region, with name CLUSTER_NAME-vpcsubnet-ZONE_NAME. + possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. + when VPCSubnets[].ID is set, its expected that there exist a subnet with ID or else system will give error. + when VPCSubnets[].Zone is not set, a random zone is picked from available zones of VPC.Region. + when VPCSubnets[].Name is not set, system will set name as CLUSTER_NAME-vpcsubnet-INDEX. + if subnet with name VPCSubnets[].Name not found, system will create new subnet in VPCSubnets[].Zone. + items: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + zone: + type: string + type: object + type: array + zone: + description: |- + zone is the name of Power VS zone where the cluster will be created + possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. + when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, + 1. it is expected to set the zone, not setting will result in webhook error. + 2. the zone should have PER capabilities, or else system will give error. + type: string + required: + - network + - serviceInstanceID + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 + name: ibmvpcclustertemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: IBMVPCClusterTemplate + listKind: IBMVPCClusterTemplateList + plural: ibmvpcclustertemplates + shortNames: + - ibmvpcct + singular: ibmvpcclustertemplate + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Time duration since creation of IBMVPCClusterTemplate + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IBMVPCClusterTemplate is the Schema for the ibmvpcclustertemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IBMVPCClusterTemplateSpec defines the desired state of IBMVPCClusterTemplate. + properties: + template: + description: IBMVPCClusterTemplateResource describes the data needed + to create an IBMVPCCluster from a template. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: IBMVPCClusterSpec defines the desired state of IBMVPCCluster. + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint + used to communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server + is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server + is serving. + format: int32 + type: integer + required: + - host + - port + type: object + controlPlaneLoadBalancer: + description: |- + ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. + Use this for legacy support, use Network.LoadBalancers for the extended VPC support. + properties: + additionalListeners: + description: AdditionalListeners sets the additional listeners + for the control plane load balancer. + items: + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an VPC load balancer. + properties: + defaultPoolName: + description: defaultPoolName defines the name of + a VPC Load Balancer Backend Pool to use for the + VPC Load Balancer Listener. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + port: + description: Port sets the port for the additional + listener. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: |- + protocol defines the protocol to use for the VPC Load Balancer Listener. + Will default to TCP protocol if not specified. + enum: + - http + - https + - tcp + - udp + type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + x-kubernetes-list-type: map + backendPools: + description: backendPools defines the load balancer's + backend pools. + items: + description: VPCLoadBalancerBackendPoolSpec defines + the desired configuration of a VPC Load Balancer Backend + Pool. + properties: + algorithm: + description: algorithm defines the load balancing + algorithm to use. + enum: + - least_connections + - round_robin + - weighted_round_robin + type: string + healthMonitor: + description: healthMonitor defines the backend pool's + health monitor. + properties: + delay: + description: delay defines the seconds to wait + between health checks. + format: int64 + maximum: 60 + minimum: 2 + type: integer + port: + description: port defines the port to perform + health monitoring on. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + retries: + description: retries defines the max retries + for health check. + format: int64 + maximum: 10 + minimum: 1 + type: integer + timeout: + description: timeout defines the seconds to + wait for a health check response. + format: int64 + maximum: 59 + minimum: 1 + type: integer + type: + description: type defines the protocol used + for health checks. + enum: + - http + - https + - tcp + type: string + urlPath: + description: urlPath defines the URL to use + for health monitoring. + pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ + type: string + required: + - delay + - retries + - timeout + - type + type: object + name: + description: name defines the name of the Backend + Pool. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + protocol: + description: protocol defines the protocol to use + for the Backend Pool. + enum: + - http + - https + - tcp + - udp + type: string + required: + - algorithm + - healthMonitor + - protocol + type: object + type: array + id: + description: id of the loadbalancer + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: Name sets the name of the VPC load balancer. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + public: + default: true + description: public indicates that load balancer is public + or private + type: boolean + securityGroups: + description: |- + securityGroups defines the Security Groups to attach to the load balancer. + Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnets: + description: |- + subnets defines the VPC Subnets to attach to the load balancer. + Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + type: object + image: + description: image represents the Image details used for the + cluster. + properties: + cosBucket: + description: cosBucket is the name of the IBM Cloud COS + Bucket containing the source of the image, if necessary. + type: string + cosBucketRegion: + description: cosBucketRegion is the COS region the bucket + is in. + type: string + cosInstance: + description: cosInstance is the name of the IBM Cloud + COS Instance containing the source of the image, if + necessary. + type: string + cosObject: + description: cosObject is the name of a IBM Cloud COS + Object used as the source of the image, if necessary. + type: string + crn: + description: crn is the IBM Cloud CRN of the existing + VPC Custom Image. + type: string + name: + description: name is the name of the desired VPC Custom + Image. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + operatingSystem: + description: operatingSystem is the Custom Image's Operating + System name. + type: string + resourceGroup: + description: resourceGroup is the Resource Group to create + the Custom Image in. + properties: + id: + description: id defines the IBM Cloud Resource ID. + type: string + name: + description: name defines the IBM Cloud Resource Name. + type: string + required: + - id + type: object + type: object + x-kubernetes-validations: + - message: if any of cosInstance, cosBucket, or cosObject + are specified, all must be specified + rule: (!has(self.cosInstance) && !has(self.cosBucket) && + !has(self.cosObject)) || (has(self.cosInstance) && has(self.cosBucket) + && has(self.cosObject)) + - message: an existing image name or crn must be provided, + or to create a new image the cos resources must be provided, + with or without a name + rule: has(self.name) || has(self.crn) || (has(self.cosInstance) + && has(self.cosBucket) && has(self.cosObject)) + network: + description: network represents the VPC network to use for + the cluster. + properties: + controlPlaneSubnets: + description: controlPlaneSubnets is a set of Subnet's + which define the Control Plane subnets. + items: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + zone: + type: string + type: object + type: array + loadBalancers: + description: loadBalancers is a set of VPC Load Balancer + definitions to use for the cluster. + items: + description: VPCLoadBalancerSpec defines the desired + state of an VPC load balancer. + properties: + additionalListeners: + description: AdditionalListeners sets the additional + listeners for the control plane load balancer. + items: + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an VPC load balancer. + properties: + defaultPoolName: + description: defaultPoolName defines the name + of a VPC Load Balancer Backend Pool to use + for the VPC Load Balancer Listener. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + port: + description: Port sets the port for the additional + listener. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: |- + protocol defines the protocol to use for the VPC Load Balancer Listener. + Will default to TCP protocol if not specified. + enum: + - http + - https + - tcp + - udp + type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + x-kubernetes-list-type: map + backendPools: + description: backendPools defines the load balancer's + backend pools. + items: + description: VPCLoadBalancerBackendPoolSpec defines + the desired configuration of a VPC Load Balancer + Backend Pool. + properties: + algorithm: + description: algorithm defines the load balancing + algorithm to use. + enum: + - least_connections + - round_robin + - weighted_round_robin + type: string + healthMonitor: + description: healthMonitor defines the backend + pool's health monitor. + properties: + delay: + description: delay defines the seconds + to wait between health checks. + format: int64 + maximum: 60 + minimum: 2 + type: integer + port: + description: port defines the port to + perform health monitoring on. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + retries: + description: retries defines the max retries + for health check. + format: int64 + maximum: 10 + minimum: 1 + type: integer + timeout: + description: timeout defines the seconds + to wait for a health check response. + format: int64 + maximum: 59 + minimum: 1 + type: integer + type: + description: type defines the protocol + used for health checks. + enum: + - http + - https + - tcp + type: string + urlPath: + description: urlPath defines the URL to + use for health monitoring. + pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ + type: string + required: + - delay + - retries + - timeout + - type + type: object + name: + description: name defines the name of the + Backend Pool. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + protocol: + description: protocol defines the protocol + to use for the Backend Pool. + enum: + - http + - https + - tcp + - udp + type: string + required: + - algorithm + - healthMonitor + - protocol + type: object + type: array + id: + description: id of the loadbalancer + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + description: Name sets the name of the VPC load + balancer. + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + public: + default: true + description: public indicates that load balancer + is public or private + type: boolean + securityGroups: + description: |- + securityGroups defines the Security Groups to attach to the load balancer. + Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + subnets: + description: |- + subnets defines the VPC Subnets to attach to the load balancer. + Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). + items: + description: VPCResource represents a VPC resource. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + type: array + type: object + type: array + resourceGroup: + description: |- + resourceGroup is the Resource Group containing all of the newtork resources. + This can be different than the Resource Group containing the remaining cluster resources. + properties: + id: + description: id defines the IBM Cloud Resource ID. + type: string + name: + description: name defines the IBM Cloud Resource Name. + type: string + required: + - id + type: object + securityGroups: + description: securityGroups is a set of VPCSecurityGroup's + which define the VPC Security Groups that manage traffic + within and out of the VPC. + items: + description: VPCSecurityGroup defines a VPC Security + Group that should exist or be created within the specified + VPC, with the specified Security Group Rules. + properties: + id: + description: id of the Security Group. + type: string + name: + description: name of the Security Group. + type: string + rules: + description: rules are the Security Group Rules + for the Security Group. + items: + description: VPCSecurityGroupRule defines a VPC + Security Group Rule for a specified Security + Group. + properties: + action: + description: action defines whether to allow + or deny traffic defined by the Security + Group Rule. + enum: + - allow + - deny + type: string + destination: + description: |- + destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionOutbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports + allowed for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive + upper range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive + lower range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater + than or equal to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic + protocol used for the Security Group + Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines + the type of filter to define for + the remote's destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, + and securityGroupName are not valid + for VPCSecurityGroupRuleRemoteTypeAny + remoteType + rule: 'self.remoteType == ''any'' + ? (!has(self.cidrSubnetName) && + !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid + for VPCSecurityGroupRuleRemoteTypeCIDR + remoteType + rule: 'self.remoteType == ''cidr'' + ? (has(self.cidrSubnetName) && !has(self.address) + && !has(self.securityGroupName)) + : true' + - message: only address is valid for + VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' + ? (has(self.address) && !has(self.cidrSubnetName) + && !has(self.securityGroupName)) + : true' + - message: only securityGroupName is + valid for VPCSecurityGroupRuleRemoteTypeSG + remoteType + rule: 'self.remoteType == ''sg'' ? + (has(self.securityGroupName) && + !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only + supported for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + direction: + description: direction defines whether the + traffic is inbound or outbound for the Security + Group Rule. + enum: + - inbound + - outbound + type: string + securityGroupID: + description: securityGroupID is the ID of + the Security Group for the Security Group + Rule. + type: string + source: + description: |- + source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. + Only used when direction is VPCSecurityGroupRuleDirectionInbound. + properties: + icmpCode: + description: |- + icmpCode is the ICMP code for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + icmpType: + description: |- + icmpType is the ICMP type for the Rule. + Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. + format: int64 + type: integer + portRange: + description: portRange is a range of ports + allowed for the Rule's remote. + properties: + maximumPort: + description: maximumPort is the inclusive + upper range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + minimumPort: + description: minimumPort is the inclusive + lower range of ports. + format: int64 + maximum: 65535 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: maximum port must be greater + than or equal to minimum port + rule: self.maximumPort >= self.minimumPort + protocol: + description: protocol defines the traffic + protocol used for the Security Group + Rule. + enum: + - all + - icmp + - tcp + - udp + type: string + remotes: + description: |- + remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. + Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. + This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. + items: + description: |- + VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. + The type of remote defines the additional remote details where are used for defining the remote. + properties: + address: + description: |2- + address is the address to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. + type: string + cidrSubnetName: + description: |- + cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. + type: string + remoteType: + description: remoteType defines + the type of filter to define for + the remote's destination/source. + enum: + - any + - cidr + - address + - sg + type: string + securityGroupName: + description: |- + securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. + Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG + type: string + required: + - remoteType + type: object + x-kubernetes-validations: + - message: cidrSubnetName, addresss, + and securityGroupName are not valid + for VPCSecurityGroupRuleRemoteTypeAny + remoteType + rule: 'self.remoteType == ''any'' + ? (!has(self.cidrSubnetName) && + !has(self.address) && !has(self.securityGroupName)) + : true' + - message: only cidrSubnetName is valid + for VPCSecurityGroupRuleRemoteTypeCIDR + remoteType + rule: 'self.remoteType == ''cidr'' + ? (has(self.cidrSubnetName) && !has(self.address) + && !has(self.securityGroupName)) + : true' + - message: only address is valid for + VPCSecurityGroupRuleRemoteTypeIP + remoteType + rule: 'self.remoteType == ''address'' + ? (has(self.address) && !has(self.cidrSubnetName) + && !has(self.securityGroupName)) + : true' + - message: only securityGroupName is + valid for VPCSecurityGroupRuleRemoteTypeSG + remoteType + rule: 'self.remoteType == ''sg'' ? + (has(self.securityGroupName) && + !has(self.cidrSubnetName) && !has(self.address)) + : true' + type: array + required: + - protocol + - remotes + type: object + x-kubernetes-validations: + - message: icmpCode and icmpType are only + supported for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) + && !has(self.icmpType)) : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll + protocol + rule: 'self.protocol == ''all'' ? !has(self.portRange) + : true' + - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp + protocol + rule: 'self.protocol == ''icmp'' ? !has(self.portRange) + : true' + required: + - action + - direction + type: object + x-kubernetes-validations: + - message: both destination and source cannot + be provided + rule: (has(self.destination) && !has(self.source)) + || (!has(self.destination) && has(self.source)) + - message: source must be set for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? has(self.source) + : true' + - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound + direction + rule: 'self.direction == ''inbound'' ? !has(self.destination) + : true' + - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? has(self.destination) + : true' + - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound + direction + rule: 'self.direction == ''outbound'' ? !has(self.source) + : true' + type: array + tags: + description: tags are tags to add to the Security + Group. + items: + type: string + type: array + type: object + x-kubernetes-validations: + - message: either an id or name must be specified + rule: has(self.id) || has(self.name) + type: array + vpc: + description: vpc defines the IBM Cloud VPC for extended + VPC Infrastructure support. + properties: + id: + description: id of the resource. + minLength: 1 + type: string + name: + description: name of the resource. + minLength: 1 + type: string + type: object + x-kubernetes-validations: + - message: an id or name must be provided + rule: has(self.id) || has(self.name) + workerSubnets: + description: workerSubnets is a set of Subnet's which + define the Worker subnets. + items: + description: Subnet describes a subnet. + properties: + cidr: + type: string + id: + maxLength: 64 + minLength: 1 + pattern: ^[-0-9a-z_]+$ + type: string + name: + maxLength: 63 + minLength: 1 + pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ + type: string + zone: + type: string + type: object + type: array + type: object + region: + description: The IBM Cloud Region the cluster lives in. + type: string + resourceGroup: + description: The VPC resources should be created under the + resource group. + type: string + vpc: + description: The Name of VPC. + type: string + zone: + description: The Name of availability zone. + type: string + required: + - region + - resourceGroup + type: object + required: + - spec + type: object + type: object + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-manager-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - clusters/status + - machines + - machines/status + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - ibmpowervsclusters + - ibmpowervsimages + - ibmpowervsmachines + - ibmvpcclusters + - ibmvpcmachines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - ibmpowervsclusters/status + - ibmpowervsimages/status + - ibmpowervsmachines/status + - ibmpowervsmachinetemplates/status + - ibmvpcclusters/status + - ibmvpcmachines/status + - ibmvpcmachinetemplates/status + verbs: + - get + - patch + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - ibmpowervsmachinetemplates + - ibmvpcmachinetemplates + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: capi-ibmcloud-manager-role +subjects: +- kind: ServiceAccount + name: capi-ibmcloud-manager + namespace: openshift-cluster-api +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-leader-elect-role + namespace: openshift-cluster-api +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-leader-elect-rolebinding + namespace: openshift-cluster-api +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: capi-ibmcloud-leader-elect-role +subjects: +- kind: ServiceAccount + name: capi-ibmcloud-manager + namespace: openshift-cluster-api +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-manager + namespace: openshift-cluster-api +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + control-plane: controller-manager + name: capi-ibmcloud-controller-manager + namespace: openshift-cluster-api +spec: + replicas: 1 + selector: + matchLabels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + control-plane: controller-manager + strategy: {} + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + control-plane: controller-manager + spec: + containers: + - args: + - --leader-elect + - --provider-id-fmt=${PROVIDER_ID_FORMAT:=v2} + - --diagnostics-address=${CAPIBM_DIAGNOSTICS_ADDRESS:=:8443} + - --insecure-diagnostics=${CAPIBM_INSECURE_DIAGNOSTICS:=false} + - --service-endpoint=${SERVICE_ENDPOINT:=none} + - --v=${LOGLEVEL:=0} + command: + - /manager + env: + - name: IBM_CREDENTIALS_FILE + value: /home/.ibmcloud/ibm-credentials.env + image: registry.ci.openshift.org/openshift:ibmcloud-cluster-api-controllers + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: healthz + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 9440 + name: healthz + protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + resources: + requests: + cpu: 10m + memory: 50Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + - mountPath: /home/.ibmcloud + name: credentials + priorityClassName: system-cluster-critical + serviceAccountName: capi-ibmcloud-manager + terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert + - name: credentials + secret: + secretName: capi-ibmcloud-manager-bootstrap-credentials +status: {} +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervscluster + failurePolicy: Fail + name: mibmpowervscluster.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsclusters + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsclustertemplate + failurePolicy: Fail + name: mibmpowervsclustertemplate.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsclustertemplates + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsimage + failurePolicy: Fail + name: mibmpowervsimage.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsimages + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachine + failurePolicy: Fail + name: mibmpowervsmachine.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsmachines + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachinetemplate + failurePolicy: Fail + name: mibmpowervsmachinetemplate.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsmachinetemplates + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpccluster + failurePolicy: Fail + name: mibmvpccluster.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmvpcclusters + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachine + failurePolicy: Fail + name: mibmvpcmachine.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmvpcmachines + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachinetemplate + failurePolicy: Fail + name: mibmvpcmachinetemplate.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmvpcmachinetemplates + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + annotations: + service.beta.openshift.io/inject-cabundle: "true" + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervscluster + failurePolicy: Fail + name: vibmpowervscluster.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsclusters + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsclustertemplate + failurePolicy: Fail + name: vibmpowervsclustertemplate.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsclustertemplates + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsimage + failurePolicy: Fail + name: vibmpowervsimage.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsimages + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachine + failurePolicy: Fail + name: vibmpowervsmachine.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsmachines + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachinetemplate + failurePolicy: Fail + name: vibmpowervsmachinetemplate.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmpowervsmachinetemplates + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpccluster + failurePolicy: Fail + name: vibmvpccluster.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmvpcclusters + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachine + failurePolicy: Fail + name: vibmvpcmachine.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmvpcmachines + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachinetemplate + failurePolicy: Fail + name: vibmvpcmachinetemplate.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - ibmvpcmachinetemplates + sideEffects: None +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.openshift.io/serving-cert-secret-name: webhook-server-cert + labels: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + name: capi-ibmcloud-webhook-service + namespace: openshift-cluster-api +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + cluster.x-k8s.io/provider: infrastructure-ibmcloud + control-plane: controller-manager +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: openshift-cluster-api-protect-ibmpowervscluster +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - '*' + operations: + - DELETE + resources: + - ibmpowervsclusters + paramKind: + apiVersion: config.openshift.io/v1 + kind: Infrastructure + validations: + - expression: '!(oldObject.metadata.name == params.status.infrastructureName)' + message: InfraCluster resources with metadata.name corresponding to the cluster + infrastructureName cannot be deleted. +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicyBinding +metadata: + name: openshift-cluster-api-protect-ibmpowervscluster +spec: + matchResources: + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: openshift-cluster-api + paramRef: + name: cluster + parameterNotFoundAction: Deny + policyName: openshift-cluster-api-protect-ibmpowervscluster + validationActions: + - Deny diff --git a/openshift/capi-operator-manifests/default/metadata.yaml b/openshift/capi-operator-manifests/default/metadata.yaml new file mode 100644 index 0000000000..4bcbd1403e --- /dev/null +++ b/openshift/capi-operator-manifests/default/metadata.yaml @@ -0,0 +1,7 @@ +attributes: + type: infrastructure + version: v0.12.0 +installOrder: 20 +name: cluster-api-provider-ibmcloud +ocpPlatform: PowerVS +selfImageRef: registry.ci.openshift.org/openshift:ibmcloud-cluster-api-controllers diff --git a/openshift/kustomization.yaml b/openshift/kustomization.yaml new file mode 100644 index 0000000000..a68719e57a --- /dev/null +++ b/openshift/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +components: +- tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen + +resources: +- ../config/default + +images: +- name: gcr.io/k8s-staging-capi-ibmcloud/cluster-api-ibmcloud-controller + newName: registry.ci.openshift.org/openshift + newTag: ibmcloud-cluster-api-controllers diff --git a/openshift/manifests/.gitkeep b/openshift/manifests/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openshift/manifests/0000_30_cluster-api_04_cm.infrastructure-ibmcloud.yaml b/openshift/manifests/0000_30_cluster-api_04_cm.infrastructure-ibmcloud.yaml deleted file mode 100644 index 25f42a639b..0000000000 --- a/openshift/manifests/0000_30_cluster-api_04_cm.infrastructure-ibmcloud.yaml +++ /dev/null @@ -1,8780 +0,0 @@ -apiVersion: v1 -data: - components: |- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmvpcclusters.infrastructure.cluster.x-k8s.io - spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /convert - conversionReviewVersions: - - v1 - - v1beta1 - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: IBMVPCCluster - listKind: IBMVPCClusterList - plural: ibmvpcclusters - singular: ibmvpccluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Cluster to which this IBMVPCCluster belongs - jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name - name: Cluster - type: string - - description: Cluster infrastructure is ready for IBM VPC instances - jsonPath: .status.ready - name: Ready - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMVPCCluster is the Schema for the ibmvpcclusters API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMVPCClusterSpec defines the desired state of IBMVPCCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint used to - communicate with the control plane. - properties: - host: - description: host is the hostname on which the API server is serving. - maxLength: 512 - type: string - port: - description: port is the port on which the API server is serving. - format: int32 - type: integer - required: - - host - - port - type: object - controlPlaneLoadBalancer: - description: ControlPlaneLoadBalancer is optional configuration for - customizing control plane behavior. - properties: - name: - description: Name sets the name of the VPC load balancer. - maxLength: 63 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - type: object - region: - description: The IBM Cloud Region the cluster lives in. - type: string - resourceGroup: - description: The VPC resources should be created under the resource - group. - type: string - vpc: - description: The Name of VPC. - type: string - zone: - description: The Name of availability zone. - type: string - required: - - region - - resourceGroup - type: object - status: - description: IBMVPCClusterStatus defines the observed state of IBMVPCCluster. - properties: - conditions: - description: Conditions defines current service state of the load - balancer. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - controlPlaneLoadBalancerState: - description: ControlPlaneLoadBalancerState is the status of the load - balancer. - type: string - ready: - description: Ready is true when the provider resource is ready. - type: boolean - subnet: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - type: string - name: - type: string - zone: - type: string - required: - - cidr - - id - - name - - zone - type: object - vpc: - description: |- - INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - Important: Run "make" to regenerate code after modifying this file - properties: - id: - type: string - name: - type: string - required: - - id - - name - type: object - vpcEndpoint: - description: VPCEndpoint describes a VPCEndpoint. - properties: - address: - type: string - floatingIPID: - description: 'Deprecated: This field has no function and is going - to be removed in the next release.' - type: string - loadBalancerIPID: - type: string - required: - - address - type: object - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - description: Cluster to which this IBMVPCCluster belongs - jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name - name: Cluster - type: string - - description: Cluster infrastructure is ready for IBM VPC instances - jsonPath: .status.ready - name: Ready - type: string - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMVPCCluster is the Schema for the ibmvpcclusters API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMVPCClusterSpec defines the desired state of IBMVPCCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint used to - communicate with the control plane. - properties: - host: - description: host is the hostname on which the API server is serving. - maxLength: 512 - type: string - port: - description: port is the port on which the API server is serving. - format: int32 - type: integer - required: - - host - - port - type: object - controlPlaneLoadBalancer: - description: |- - ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. - Use this for legacy support, use Network.LoadBalancers for the extended VPC support. - properties: - additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. - items: - description: |- - AdditionalListenerSpec defines the desired state of an - additional listener on an VPC load balancer. - properties: - defaultPoolName: - description: defaultPoolName defines the name of a VPC Load - Balancer Backend Pool to use for the VPC Load Balancer - Listener. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - port: - description: Port sets the port for the additional listener. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - protocol: - description: |- - protocol defines the protocol to use for the VPC Load Balancer Listener. - Will default to TCP protocol if not specified. - enum: - - http - - https - - tcp - - udp - type: string - selector: - description: |- - The selector is used to find IBMPowerVSMachines with matching labels. - If the label matches, the machine is then added to the load balancer listener configuration. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - x-kubernetes-list-type: map - backendPools: - description: backendPools defines the load balancer's backend - pools. - items: - description: VPCLoadBalancerBackendPoolSpec defines the desired - configuration of a VPC Load Balancer Backend Pool. - properties: - algorithm: - description: algorithm defines the load balancing algorithm - to use. - enum: - - least_connections - - round_robin - - weighted_round_robin - type: string - healthMonitor: - description: healthMonitor defines the backend pool's health - monitor. - properties: - delay: - description: delay defines the seconds to wait between - health checks. - format: int64 - maximum: 60 - minimum: 2 - type: integer - port: - description: port defines the port to perform health - monitoring on. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - retries: - description: retries defines the max retries for health - check. - format: int64 - maximum: 10 - minimum: 1 - type: integer - timeout: - description: timeout defines the seconds to wait for - a health check response. - format: int64 - maximum: 59 - minimum: 1 - type: integer - type: - description: type defines the protocol used for health - checks. - enum: - - http - - https - - tcp - type: string - urlPath: - description: urlPath defines the URL to use for health - monitoring. - pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ - type: string - required: - - delay - - retries - - timeout - - type - type: object - name: - description: name defines the name of the Backend Pool. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - protocol: - description: protocol defines the protocol to use for the - Backend Pool. - enum: - - http - - https - - tcp - - udp - type: string - required: - - algorithm - - healthMonitor - - protocol - type: object - type: array - id: - description: id of the loadbalancer - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: Name sets the name of the VPC load balancer. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - public: - default: true - description: public indicates that load balancer is public or - private - type: boolean - securityGroups: - description: |- - securityGroups defines the Security Groups to attach to the load balancer. - Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnets: - description: |- - subnets defines the VPC Subnets to attach to the load balancer. - Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - type: object - image: - description: image represents the Image details used for the cluster. - properties: - cosBucket: - description: cosBucket is the name of the IBM Cloud COS Bucket - containing the source of the image, if necessary. - type: string - cosBucketRegion: - description: cosBucketRegion is the COS region the bucket is in. - type: string - cosInstance: - description: cosInstance is the name of the IBM Cloud COS Instance - containing the source of the image, if necessary. - type: string - cosObject: - description: cosObject is the name of a IBM Cloud COS Object used - as the source of the image, if necessary. - type: string - crn: - description: crn is the IBM Cloud CRN of the existing VPC Custom - Image. - type: string - name: - description: name is the name of the desired VPC Custom Image. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - operatingSystem: - description: operatingSystem is the Custom Image's Operating System - name. - type: string - resourceGroup: - description: resourceGroup is the Resource Group to create the - Custom Image in. - properties: - id: - description: id defines the IBM Cloud Resource ID. - type: string - name: - description: name defines the IBM Cloud Resource Name. - type: string - required: - - id - type: object - type: object - x-kubernetes-validations: - - message: if any of cosInstance, cosBucket, or cosObject are specified, - all must be specified - rule: (!has(self.cosInstance) && !has(self.cosBucket) && !has(self.cosObject)) - || (has(self.cosInstance) && has(self.cosBucket) && has(self.cosObject)) - - message: an existing image name or crn must be provided, or to create - a new image the cos resources must be provided, with or without - a name - rule: has(self.name) || has(self.crn) || (has(self.cosInstance) - && has(self.cosBucket) && has(self.cosObject)) - network: - description: network represents the VPC network to use for the cluster. - properties: - controlPlaneSubnets: - description: controlPlaneSubnets is a set of Subnet's which define - the Control Plane subnets. - items: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - zone: - type: string - type: object - type: array - loadBalancers: - description: loadBalancers is a set of VPC Load Balancer definitions - to use for the cluster. - items: - description: VPCLoadBalancerSpec defines the desired state of - an VPC load balancer. - properties: - additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. - items: - description: |- - AdditionalListenerSpec defines the desired state of an - additional listener on an VPC load balancer. - properties: - defaultPoolName: - description: defaultPoolName defines the name of a - VPC Load Balancer Backend Pool to use for the VPC - Load Balancer Listener. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - port: - description: Port sets the port for the additional - listener. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - protocol: - description: |- - protocol defines the protocol to use for the VPC Load Balancer Listener. - Will default to TCP protocol if not specified. - enum: - - http - - https - - tcp - - udp - type: string - selector: - description: |- - The selector is used to find IBMPowerVSMachines with matching labels. - If the label matches, the machine is then added to the load balancer listener configuration. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - x-kubernetes-list-type: map - backendPools: - description: backendPools defines the load balancer's backend - pools. - items: - description: VPCLoadBalancerBackendPoolSpec defines the - desired configuration of a VPC Load Balancer Backend - Pool. - properties: - algorithm: - description: algorithm defines the load balancing - algorithm to use. - enum: - - least_connections - - round_robin - - weighted_round_robin - type: string - healthMonitor: - description: healthMonitor defines the backend pool's - health monitor. - properties: - delay: - description: delay defines the seconds to wait - between health checks. - format: int64 - maximum: 60 - minimum: 2 - type: integer - port: - description: port defines the port to perform - health monitoring on. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - retries: - description: retries defines the max retries for - health check. - format: int64 - maximum: 10 - minimum: 1 - type: integer - timeout: - description: timeout defines the seconds to wait - for a health check response. - format: int64 - maximum: 59 - minimum: 1 - type: integer - type: - description: type defines the protocol used for - health checks. - enum: - - http - - https - - tcp - type: string - urlPath: - description: urlPath defines the URL to use for - health monitoring. - pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ - type: string - required: - - delay - - retries - - timeout - - type - type: object - name: - description: name defines the name of the Backend - Pool. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - protocol: - description: protocol defines the protocol to use - for the Backend Pool. - enum: - - http - - https - - tcp - - udp - type: string - required: - - algorithm - - healthMonitor - - protocol - type: object - type: array - id: - description: id of the loadbalancer - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: Name sets the name of the VPC load balancer. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - public: - default: true - description: public indicates that load balancer is public - or private - type: boolean - securityGroups: - description: |- - securityGroups defines the Security Groups to attach to the load balancer. - Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnets: - description: |- - subnets defines the VPC Subnets to attach to the load balancer. - Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - type: object - type: array - resourceGroup: - description: |- - resourceGroup is the Resource Group containing all of the newtork resources. - This can be different than the Resource Group containing the remaining cluster resources. - properties: - id: - description: id defines the IBM Cloud Resource ID. - type: string - name: - description: name defines the IBM Cloud Resource Name. - type: string - required: - - id - type: object - securityGroups: - description: securityGroups is a set of VPCSecurityGroup's which - define the VPC Security Groups that manage traffic within and - out of the VPC. - items: - description: VPCSecurityGroup defines a VPC Security Group that - should exist or be created within the specified VPC, with - the specified Security Group Rules. - properties: - id: - description: id of the Security Group. - type: string - name: - description: name of the Security Group. - type: string - rules: - description: rules are the Security Group Rules for the - Security Group. - items: - description: VPCSecurityGroupRule defines a VPC Security - Group Rule for a specified Security Group. - properties: - action: - description: action defines whether to allow or deny - traffic defined by the Security Group Rule. - enum: - - allow - - deny - type: string - destination: - description: |- - destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionOutbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports allowed - for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive - upper range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive - lower range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater than or - equal to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic protocol - used for the Security Group Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines the type - of filter to define for the remote's destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, and securityGroupName - are not valid for VPCSecurityGroupRuleRemoteTypeAny - remoteType - rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid for - VPCSecurityGroupRuleRemoteTypeCIDR remoteType - rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' ? (has(self.address) - && !has(self.cidrSubnetName) && !has(self.securityGroupName)) - : true' - - message: only securityGroupName is valid for - VPCSecurityGroupRuleRemoteTypeSG remoteType - rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) - && !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only supported - for VPCSecurityGroupRuleProtocolIcmp protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - direction: - description: direction defines whether the traffic - is inbound or outbound for the Security Group Rule. - enum: - - inbound - - outbound - type: string - securityGroupID: - description: securityGroupID is the ID of the Security - Group for the Security Group Rule. - type: string - source: - description: |- - source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionInbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports allowed - for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive - upper range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive - lower range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater than or - equal to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic protocol - used for the Security Group Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines the type - of filter to define for the remote's destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, and securityGroupName - are not valid for VPCSecurityGroupRuleRemoteTypeAny - remoteType - rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid for - VPCSecurityGroupRuleRemoteTypeCIDR remoteType - rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' ? (has(self.address) - && !has(self.cidrSubnetName) && !has(self.securityGroupName)) - : true' - - message: only securityGroupName is valid for - VPCSecurityGroupRuleRemoteTypeSG remoteType - rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) - && !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only supported - for VPCSecurityGroupRuleProtocolIcmp protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - required: - - action - - direction - type: object - x-kubernetes-validations: - - message: both destination and source cannot be provided - rule: (has(self.destination) && !has(self.source)) || - (!has(self.destination) && has(self.source)) - - message: source must be set for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? has(self.source) - : true' - - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? !has(self.destination) - : true' - - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? has(self.destination) - : true' - - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? !has(self.source) - : true' - type: array - tags: - description: tags are tags to add to the Security Group. - items: - type: string - type: array - type: object - x-kubernetes-validations: - - message: either an id or name must be specified - rule: has(self.id) || has(self.name) - type: array - vpc: - description: vpc defines the IBM Cloud VPC for extended VPC Infrastructure - support. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - workerSubnets: - description: workerSubnets is a set of Subnet's which define the - Worker subnets. - items: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - zone: - type: string - type: object - type: array - type: object - region: - description: The IBM Cloud Region the cluster lives in. - type: string - resourceGroup: - description: The VPC resources should be created under the resource - group. - type: string - vpc: - description: The Name of VPC. - type: string - zone: - description: The Name of availability zone. - type: string - required: - - region - - resourceGroup - type: object - status: - description: IBMVPCClusterStatus defines the observed state of IBMVPCCluster. - properties: - conditions: - description: Conditions defines current service state of the load - balancer. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - controlPlaneLoadBalancerState: - description: ControlPlaneLoadBalancerState is the status of the load - balancer. - type: string - image: - description: image is the status of the VPC Custom Image. - properties: - id: - description: id defines the Id of the IBM Cloud resource status. - type: string - name: - description: name defines the name of the IBM Cloud resource status. - type: string - ready: - description: ready defines whether the IBM Cloud resource is ready. - type: boolean - required: - - id - - ready - type: object - network: - description: network is the status of the VPC network resources for - extended VPC Infrastructure support. - properties: - controlPlaneSubnets: - additionalProperties: - description: ResourceStatus identifies a resource by id (and - name) and whether it is ready. - properties: - id: - description: id defines the Id of the IBM Cloud resource - status. - type: string - name: - description: name defines the name of the IBM Cloud resource - status. - type: string - ready: - description: ready defines whether the IBM Cloud resource - is ready. - type: boolean - required: - - id - - ready - type: object - description: |- - controlPlaneSubnets references the VPC Subnets for the cluster's Control Plane. - The map simplifies lookups. - type: object - loadBalancers: - additionalProperties: - description: VPCLoadBalancerStatus defines the status VPC load - balancer. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - hostname: - description: hostname is the hostname of load balancer. - type: string - id: - description: id of VPC load balancer. - type: string - state: - description: State is the status of the load balancer. - type: string - type: object - description: |- - loadBalancers references the VPC Load Balancer's for the cluster. - The map simplifies lookups. - type: object - publicGateways: - additionalProperties: - description: ResourceStatus identifies a resource by id (and - name) and whether it is ready. - properties: - id: - description: id defines the Id of the IBM Cloud resource - status. - type: string - name: - description: name defines the name of the IBM Cloud resource - status. - type: string - ready: - description: ready defines whether the IBM Cloud resource - is ready. - type: boolean - required: - - id - - ready - type: object - description: |- - publicGateways references the VPC Public Gateways for the cluster. - The map simplifies lookups. - type: object - resourceGroup: - description: |- - resourceGroup references the Resource Group for Network resources for the cluster. - This can be the same or unique from the cluster's Resource Group. - properties: - id: - description: id defines the Id of the IBM Cloud resource status. - type: string - name: - description: name defines the name of the IBM Cloud resource - status. - type: string - ready: - description: ready defines whether the IBM Cloud resource - is ready. - type: boolean - required: - - id - - ready - type: object - securityGroups: - additionalProperties: - description: ResourceStatus identifies a resource by id (and - name) and whether it is ready. - properties: - id: - description: id defines the Id of the IBM Cloud resource - status. - type: string - name: - description: name defines the name of the IBM Cloud resource - status. - type: string - ready: - description: ready defines whether the IBM Cloud resource - is ready. - type: boolean - required: - - id - - ready - type: object - description: |- - securityGroups references the VPC Security Groups for the cluster. - The map simplifies lookups. - type: object - vpc: - description: vpc references the status of the IBM Cloud VPC as - part of the extended VPC Infrastructure support. - properties: - id: - description: id defines the Id of the IBM Cloud resource status. - type: string - name: - description: name defines the name of the IBM Cloud resource - status. - type: string - ready: - description: ready defines whether the IBM Cloud resource - is ready. - type: boolean - required: - - id - - ready - type: object - workerSubnets: - additionalProperties: - description: ResourceStatus identifies a resource by id (and - name) and whether it is ready. - properties: - id: - description: id defines the Id of the IBM Cloud resource - status. - type: string - name: - description: name defines the name of the IBM Cloud resource - status. - type: string - ready: - description: ready defines whether the IBM Cloud resource - is ready. - type: boolean - required: - - id - - ready - type: object - description: |- - workerSubnets references the VPC Subnets for the cluster's Data Plane. - The map simplifies lookups. - type: object - type: object - ready: - default: false - description: Ready is true when the provider resource is ready. - type: boolean - resourceGroup: - description: resourceGroup is the status of the cluster's Resource - Group for extended VPC Infrastructure support. - properties: - id: - description: id defines the Id of the IBM Cloud resource status. - type: string - name: - description: name defines the name of the IBM Cloud resource status. - type: string - ready: - description: ready defines whether the IBM Cloud resource is ready. - type: boolean - required: - - id - - ready - type: object - subnet: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - zone: - type: string - type: object - v1beta2: - description: V1beta2 groups all the fields that will be added or modified - in IBMVPCCluster's status with the V1Beta2 version. - properties: - conditions: - description: Conditions represents the observations of a IBMVPCCluster's - current state. - items: - description: Condition contains details for one aspect of the - current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - vpc: - description: |- - Important: Run "make" to regenerate code after modifying this file - dep: rely on Network instead. - properties: - id: - type: string - name: - type: string - required: - - id - - name - type: object - vpcEndpoint: - description: VPCEndpoint describes a VPCEndpoint. - properties: - address: - type: string - floatingIPID: - description: 'Deprecated: This field has no function and is going - to be removed in the next release.' - type: string - loadBalancerIPID: - type: string - required: - - address - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmvpcmachines.infrastructure.cluster.x-k8s.io - spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /convert - conversionReviewVersions: - - v1 - - v1beta1 - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: IBMVPCMachine - listKind: IBMVPCMachineList - plural: ibmvpcmachines - singular: ibmvpcmachine - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Cluster infrastructure is ready for IBM VPC instances - jsonPath: .status.ready - name: Ready - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMVPCMachine is the Schema for the ibmvpcmachines API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMVPCMachineSpec defines the desired state of IBMVPCMachine. - properties: - bootVolume: - description: BootVolume contains machines's boot volume configurations - like size, iops etc.. - properties: - deleteVolumeOnInstanceDelete: - default: true - description: |- - DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. - Default is set as true - type: boolean - encryptionKeyCRN: - description: |- - EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN - and possible values are as follows. - The CRN of the [Key Protect Root - Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto - Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. - If unspecified, the `encryption` type for the volume will be `provider_managed`. - type: string - iops: - description: |- - Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile - family of `custom`. - format: int64 - type: integer - name: - description: |- - Name is the unique user-defined name for this volume. - Default will be autogenerated - type: string - profile: - default: general-purpose - description: |- - Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles - for more information. - Default to general-purpose - enum: - - general-purpose - - 5iops-tier - - 10iops-tier - - custom - type: string - sizeGiB: - description: |- - SizeGiB is the size of the virtual server's boot disk in GiB. - Default to the size of the image's `minimum_provisioned_size`. - format: int64 - type: integer - type: object - image: - description: |- - Image is the id of OS image which would be install on the instance. - Example: r134-ed3f775f-ad7e-4e37-ae62-7199b4988b00 - type: string - imageName: - description: ImageName is the name of OS image which would be install - on the instance. - type: string - name: - description: Name of the instance. - type: string - primaryNetworkInterface: - description: PrimaryNetworkInterface is required to specify subnet. - properties: - subnet: - description: Subnet ID of the network interface. - type: string - type: object - profile: - description: "Profile indicates the flavor of instance. Example: bx2-8x32\tmeans - 8 vCPUs\t32 GB RAM\t16 Gbps" - type: string - providerID: - description: ProviderID is the unique identifier as specified by the - cloud provider. - type: string - sshKeyNames: - description: SSHKeysNames is the SSH pub key names that will be used - to access VM. - items: - type: string - type: array - sshKeys: - description: SSHKeys is the SSH pub keys that will be used to access - VM. - items: - type: string - type: array - zone: - description: 'Zone is the place where the instance should be created. - Example: us-south-3' - type: string - required: - - zone - type: object - status: - description: IBMVPCMachineStatus defines the observed state of IBMVPCMachine. - properties: - addresses: - description: Addresses contains the GCP instance associated addresses. - items: - description: NodeAddress contains information for the node's address. - properties: - address: - description: The node address. - type: string - type: - description: Node address type, one of Hostname, ExternalIP - or InternalIP. - type: string - required: - - address - - type - type: object - type: array - instanceID: - type: string - instanceState: - description: InstanceStatus is the status of the GCP instance for - this machine. - type: string - ready: - description: Ready is true when the provider resource is ready. - type: boolean - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - description: Cluster infrastructure is ready for IBM VPC instances - jsonPath: .status.ready - name: Ready - type: string - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMVPCMachine is the Schema for the ibmvpcmachines API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMVPCMachineSpec defines the desired state of IBMVPCMachine. - properties: - bootVolume: - description: BootVolume contains machines's boot volume configurations - like size, iops etc.. - properties: - deleteVolumeOnInstanceDelete: - default: true - description: |- - DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. - Default is set as true - type: boolean - encryptionKeyCRN: - description: |- - EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN - and possible values are as follows. - The CRN of the [Key Protect Root - Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto - Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. - If unspecified, the `encryption` type for the volume will be `provider_managed`. - type: string - iops: - description: |- - Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile - family of `custom`. - format: int64 - type: integer - name: - description: |- - Name is the unique user-defined name for this volume. - Default will be autogenerated - type: string - profile: - default: general-purpose - description: |- - Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles - for more information. - Default to general-purpose - enum: - - general-purpose - - 5iops-tier - - 10iops-tier - - custom - type: string - sizeGiB: - description: |- - SizeGiB is the size of the virtual server's boot disk in GiB. - Default to the size of the image's `minimum_provisioned_size`. - format: int64 - type: integer - type: object - catalogOffering: - description: |- - CatalogOffering is the Catalog Offering OS image which would be installed on the instance. - An OfferingCRN or VersionCRN is required, the PlanCRN is optional. - properties: - offeringCRN: - description: |- - OfferingCRN defines the IBM Cloud Catalog Offering CRN. Using the OfferingCRN expects that the latest version of the Offering will be used. - If a specific version should be used instead, rely on VersionCRN. - type: string - planCRN: - description: PlanCRN defines the IBM Cloud Catalog Offering Plan - CRN to use for the Offering. - type: string - versionCRN: - description: VersionCRN defines the IBM Cloud Catalog Offering - Version CRN. A specific version of the Catalog Offering will - be used, as defined by this CRN. - type: string - type: object - x-kubernetes-validations: - - message: either offeringCRN or version CRN must be provided, not - both - rule: (has(self.offeringCRN) && !has(self.versionCRN)) || (!has(self.offeringCRN) - && has(self.versionCRN)) - image: - description: |- - Image is the OS image which would be install on the instance. - ID will take higher precedence over Name if both specified. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - type: object - loadBalancerPoolMembers: - description: LoadBalancerPoolMembers is the set of IBM Cloud VPC Load - Balancer Backend Pools the machine should be added to as a member. - items: - description: VPCLoadBalancerBackendPoolMember represents a VPC Load - Balancer Backend Pool Member. - properties: - loadBalancer: - description: LoadBalancer defines the Load Balancer the Pool - Member is for. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - pool: - description: Pool defines the Load Balancer Pool the Pool Member - should be in. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - port: - description: Port defines the Port the Load Balancer Pool Member - listens for traffic. - format: int64 - type: integer - weight: - description: Weight of the service member. Only applicable if - the pool algorithm is "weighted_round_robin". - format: int64 - type: integer - required: - - loadBalancer - - pool - - port - type: object - type: array - name: - description: Name of the instance. - type: string - placementTarget: - description: PlacementTarget is the placement restrictions to use - for the virtual server instance. No restrictions are used when this - field is not defined. - properties: - dedicatedHost: - description: DedicatedHost defines the Dedicated Host to place - a VPC Machine (Instance) on. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - dedicatedHostGroup: - description: DedicatedHostGroup defines the Dedicated Host Group - to use when placing a VPC Machine (Instance). - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - placementGroup: - description: PlacementGroup defines the Placement Group to use - when placing a VPC Machine (Instance). - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: object - x-kubernetes-validations: - - message: only one of dedicatedHost, dedicatedHostGroup, or placementGroup - must be defined for machine placement - rule: (has(self.dedicatedHost) && !has(self.dedicatedHostGroup) - && !has(self.placementGroup)) || (!has(self.dedicatedHost) && - has(self.dedicatedHostGroup) && !has(self.placementGroup)) || - (!has(self.dedicatedHost) && !has(self.dedicatedHostGroup) && - has(self.placementGroup)) - primaryNetworkInterface: - description: PrimaryNetworkInterface is required to specify subnet. - properties: - securityGroups: - description: SecurityGroups defines a set of IBM Cloud VPC Security - Groups to attach to the network interface. - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnet: - description: Subnet ID of the network interface. - type: string - type: object - profile: - description: "Profile indicates the flavor of instance. Example: bx2-8x32\tmeans - 8 vCPUs\t32 GB RAM\t16 Gbps" - type: string - providerID: - description: ProviderID is the unique identifier as specified by the - cloud provider. - type: string - sshKeys: - description: |- - SSHKeys is the SSH pub keys that will be used to access VM. - ID will take higher precedence over Name if both specified. - items: - description: |- - IBMVPCResourceReference is a reference to a specific VPC resource by ID or Name - Only one of ID or Name may be specified. Specifying more than one will result in - a validation error. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - type: object - type: array - zone: - description: 'Zone is the place where the instance should be created. - Example: us-south-3' - type: string - required: - - image - - zone - type: object - status: - description: IBMVPCMachineStatus defines the observed state of IBMVPCMachine. - properties: - addresses: - description: Addresses contains the IBM Cloud instance associated - addresses. - items: - description: NodeAddress contains information for the node's address. - properties: - address: - description: The node address. - type: string - type: - description: Node address type, one of Hostname, ExternalIP - or InternalIP. - type: string - required: - - address - - type - type: object - type: array - conditions: - description: Conditions deefines current service state of the IBMVPCMachine. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the Machine and will contain a more verbose string suitable - for logging and human consumption. - type: string - failureReason: - description: |- - FailureReason will be set in the event that there is a terminal problem - reconciling the Machine and will contain a succinct value suitable - for machine interpretation. - type: string - instanceID: - description: InstanceID defines the IBM Cloud VPC Instance UUID. - type: string - instanceState: - description: InstanceStatus is the status of the IBM Cloud instance - for this machine. - type: string - loadBalancerPoolMembers: - description: LoadBalancerPoolMembers is the status of IBM Cloud VPC - Load Balancer Backend Pools the machine is a member. - items: - description: VPCLoadBalancerBackendPoolMember represents a VPC Load - Balancer Backend Pool Member. - properties: - loadBalancer: - description: LoadBalancer defines the Load Balancer the Pool - Member is for. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - pool: - description: Pool defines the Load Balancer Pool the Pool Member - should be in. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - port: - description: Port defines the Port the Load Balancer Pool Member - listens for traffic. - format: int64 - type: integer - weight: - description: Weight of the service member. Only applicable if - the pool algorithm is "weighted_round_robin". - format: int64 - type: integer - required: - - loadBalancer - - pool - - port - type: object - type: array - ready: - description: Ready is true when the provider resource is ready. - type: boolean - v1beta2: - description: V1beta2 groups all the fields that will be added or modified - in IBMVPCMachine's status with the V1Beta2 version. - properties: - conditions: - description: Conditions represents the observations of a IBMVPCMachine's - current state. - items: - description: Condition contains details for one aspect of the - current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmvpcmachinetemplates.infrastructure.cluster.x-k8s.io - spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /convert - conversionReviewVersions: - - v1 - - v1beta1 - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: IBMVPCMachineTemplate - listKind: IBMVPCMachineTemplateList - plural: ibmvpcmachinetemplates - singular: ibmvpcmachinetemplate - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMVPCMachineTemplate is the Schema for the ibmvpcmachinetemplates - API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMVPCMachineTemplateSpec defines the desired state of IBMVPCMachineTemplate. - properties: - template: - description: IBMVPCMachineTemplateResource describes the data needed - to create am IBMVPCMachine from a template. - properties: - spec: - description: Spec is the specification of the desired behavior - of the machine. - properties: - bootVolume: - description: BootVolume contains machines's boot volume configurations - like size, iops etc.. - properties: - deleteVolumeOnInstanceDelete: - default: true - description: |- - DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. - Default is set as true - type: boolean - encryptionKeyCRN: - description: |- - EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN - and possible values are as follows. - The CRN of the [Key Protect Root - Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto - Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. - If unspecified, the `encryption` type for the volume will be `provider_managed`. - type: string - iops: - description: |- - Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile - family of `custom`. - format: int64 - type: integer - name: - description: |- - Name is the unique user-defined name for this volume. - Default will be autogenerated - type: string - profile: - default: general-purpose - description: |- - Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles - for more information. - Default to general-purpose - enum: - - general-purpose - - 5iops-tier - - 10iops-tier - - custom - type: string - sizeGiB: - description: |- - SizeGiB is the size of the virtual server's boot disk in GiB. - Default to the size of the image's `minimum_provisioned_size`. - format: int64 - type: integer - type: object - image: - description: |- - Image is the id of OS image which would be install on the instance. - Example: r134-ed3f775f-ad7e-4e37-ae62-7199b4988b00 - type: string - imageName: - description: ImageName is the name of OS image which would - be install on the instance. - type: string - name: - description: Name of the instance. - type: string - primaryNetworkInterface: - description: PrimaryNetworkInterface is required to specify - subnet. - properties: - subnet: - description: Subnet ID of the network interface. - type: string - type: object - profile: - description: "Profile indicates the flavor of instance. Example: - bx2-8x32\tmeans 8 vCPUs\t32 GB RAM\t16 Gbps" - type: string - providerID: - description: ProviderID is the unique identifier as specified - by the cloud provider. - type: string - sshKeyNames: - description: SSHKeysNames is the SSH pub key names that will - be used to access VM. - items: - type: string - type: array - sshKeys: - description: SSHKeys is the SSH pub keys that will be used - to access VM. - items: - type: string - type: array - zone: - description: 'Zone is the place where the instance should - be created. Example: us-south-3' - type: string - required: - - zone - type: object - required: - - spec - type: object - required: - - template - type: object - status: - description: IBMVPCMachineTemplateStatus defines the observed state of - IBMVPCMachineTemplate. - type: object - type: object - served: true - storage: false - - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMVPCMachineTemplate is the Schema for the ibmvpcmachinetemplates - API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMVPCMachineTemplateSpec defines the desired state of IBMVPCMachineTemplate. - properties: - template: - description: IBMVPCMachineTemplateResource describes the data needed - to create am IBMVPCMachine from a template. - properties: - spec: - description: Spec is the specification of the desired behavior - of the machine. - properties: - bootVolume: - description: BootVolume contains machines's boot volume configurations - like size, iops etc.. - properties: - deleteVolumeOnInstanceDelete: - default: true - description: |- - DeleteVolumeOnInstanceDelete If set to true, when deleting the instance the volume will also be deleted. - Default is set as true - type: boolean - encryptionKeyCRN: - description: |- - EncryptionKey is the root key to use to wrap the data encryption key for the volume and this points to the CRN - and possible values are as follows. - The CRN of the [Key Protect Root - Key](https://cloud.ibm.com/docs/key-protect?topic=key-protect-getting-started-tutorial) or [Hyper Protect Crypto - Service Root Key](https://cloud.ibm.com/docs/hs-crypto?topic=hs-crypto-get-started) for this resource. - If unspecified, the `encryption` type for the volume will be `provider_managed`. - type: string - iops: - description: |- - Iops is the maximum I/O operations per second (IOPS) to use for the volume. Applicable only to volumes using a profile - family of `custom`. - format: int64 - type: integer - name: - description: |- - Name is the unique user-defined name for this volume. - Default will be autogenerated - type: string - profile: - default: general-purpose - description: |- - Profile is the volume profile for the bootdisk, refer https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-profiles - for more information. - Default to general-purpose - enum: - - general-purpose - - 5iops-tier - - 10iops-tier - - custom - type: string - sizeGiB: - description: |- - SizeGiB is the size of the virtual server's boot disk in GiB. - Default to the size of the image's `minimum_provisioned_size`. - format: int64 - type: integer - type: object - catalogOffering: - description: |- - CatalogOffering is the Catalog Offering OS image which would be installed on the instance. - An OfferingCRN or VersionCRN is required, the PlanCRN is optional. - properties: - offeringCRN: - description: |- - OfferingCRN defines the IBM Cloud Catalog Offering CRN. Using the OfferingCRN expects that the latest version of the Offering will be used. - If a specific version should be used instead, rely on VersionCRN. - type: string - planCRN: - description: PlanCRN defines the IBM Cloud Catalog Offering - Plan CRN to use for the Offering. - type: string - versionCRN: - description: VersionCRN defines the IBM Cloud Catalog - Offering Version CRN. A specific version of the Catalog - Offering will be used, as defined by this CRN. - type: string - type: object - x-kubernetes-validations: - - message: either offeringCRN or version CRN must be provided, - not both - rule: (has(self.offeringCRN) && !has(self.versionCRN)) || - (!has(self.offeringCRN) && has(self.versionCRN)) - image: - description: |- - Image is the OS image which would be install on the instance. - ID will take higher precedence over Name if both specified. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - type: object - loadBalancerPoolMembers: - description: LoadBalancerPoolMembers is the set of IBM Cloud - VPC Load Balancer Backend Pools the machine should be added - to as a member. - items: - description: VPCLoadBalancerBackendPoolMember represents - a VPC Load Balancer Backend Pool Member. - properties: - loadBalancer: - description: LoadBalancer defines the Load Balancer - the Pool Member is for. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - pool: - description: Pool defines the Load Balancer Pool the - Pool Member should be in. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - port: - description: Port defines the Port the Load Balancer - Pool Member listens for traffic. - format: int64 - type: integer - weight: - description: Weight of the service member. Only applicable - if the pool algorithm is "weighted_round_robin". - format: int64 - type: integer - required: - - loadBalancer - - pool - - port - type: object - type: array - name: - description: Name of the instance. - type: string - placementTarget: - description: PlacementTarget is the placement restrictions - to use for the virtual server instance. No restrictions - are used when this field is not defined. - properties: - dedicatedHost: - description: DedicatedHost defines the Dedicated Host - to place a VPC Machine (Instance) on. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - dedicatedHostGroup: - description: DedicatedHostGroup defines the Dedicated - Host Group to use when placing a VPC Machine (Instance). - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - placementGroup: - description: PlacementGroup defines the Placement Group - to use when placing a VPC Machine (Instance). - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: object - x-kubernetes-validations: - - message: only one of dedicatedHost, dedicatedHostGroup, - or placementGroup must be defined for machine placement - rule: (has(self.dedicatedHost) && !has(self.dedicatedHostGroup) - && !has(self.placementGroup)) || (!has(self.dedicatedHost) - && has(self.dedicatedHostGroup) && !has(self.placementGroup)) - || (!has(self.dedicatedHost) && !has(self.dedicatedHostGroup) - && has(self.placementGroup)) - primaryNetworkInterface: - description: PrimaryNetworkInterface is required to specify - subnet. - properties: - securityGroups: - description: SecurityGroups defines a set of IBM Cloud - VPC Security Groups to attach to the network interface. - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnet: - description: Subnet ID of the network interface. - type: string - type: object - profile: - description: "Profile indicates the flavor of instance. Example: - bx2-8x32\tmeans 8 vCPUs\t32 GB RAM\t16 Gbps" - type: string - providerID: - description: ProviderID is the unique identifier as specified - by the cloud provider. - type: string - sshKeys: - description: |- - SSHKeys is the SSH pub keys that will be used to access VM. - ID will take higher precedence over Name if both specified. - items: - description: |- - IBMVPCResourceReference is a reference to a specific VPC resource by ID or Name - Only one of ID or Name may be specified. Specifying more than one will result in - a validation error. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - type: object - type: array - zone: - description: 'Zone is the place where the instance should - be created. Example: us-south-3' - type: string - required: - - image - - zone - type: object - required: - - spec - type: object - required: - - template - type: object - status: - description: IBMVPCMachineTemplateStatus defines the observed state of - IBMVPCMachineTemplate. - properties: - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Capacity defines the resource capacity for this machine. - This value is used for autoscaling from zero operations as defined in: - https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmpowervsclusters.infrastructure.cluster.x-k8s.io - spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /convert - conversionReviewVersions: - - v1 - - v1beta1 - group: infrastructure.cluster.x-k8s.io - names: - kind: IBMPowerVSCluster - listKind: IBMPowerVSClusterList - plural: ibmpowervsclusters - singular: ibmpowervscluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Cluster to which this IBMPowerVSCluster belongs - jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name - name: Cluster - type: string - - description: Time duration since creation of IBMPowerVSCluster - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.serviceInstanceID - name: PowerVS Cloud Instance ID - priority: 1 - type: string - - description: Control Plane Endpoint - jsonPath: .spec.controlPlaneEndpoint.host - name: Endpoint - priority: 1 - type: string - - description: Control Plane Port - jsonPath: .spec.controlPlaneEndpoint.port - name: Port - priority: 1 - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMPowerVSCluster is the Schema for the ibmpowervsclusters API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSClusterSpec defines the desired state of IBMPowerVSCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint used to - communicate with the control plane. - properties: - host: - description: host is the hostname on which the API server is serving. - maxLength: 512 - type: string - port: - description: port is the port on which the API server is serving. - format: int32 - type: integer - required: - - host - - port - type: object - network: - description: Network is the reference to the Network to use for this - cluster. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstanceID: - description: ServiceInstanceID is the id of the power cloud instance - where the vsi instance will get deployed. - minLength: 1 - type: string - required: - - network - - serviceInstanceID - type: object - status: - description: IBMPowerVSClusterStatus defines the observed state of IBMPowerVSCluster. - properties: - ready: - description: |- - INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - Important: Run "make" to regenerate code after modifying this file - type: boolean - required: - - ready - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - description: Cluster to which this IBMPowerVSCluster belongs - jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name - name: Cluster - type: string - - description: Time duration since creation of IBMPowerVSCluster - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.serviceInstanceID - name: PowerVS Cloud Instance ID - priority: 1 - type: string - - description: Control Plane Endpoint - jsonPath: .spec.controlPlaneEndpoint.host - name: Endpoint - priority: 1 - type: string - - description: Control Plane Port - jsonPath: .spec.controlPlaneEndpoint.port - name: Port - priority: 1 - type: string - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMPowerVSCluster is the Schema for the ibmpowervsclusters API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSClusterSpec defines the desired state of IBMPowerVSCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint used to - communicate with the control plane. - properties: - host: - description: host is the hostname on which the API server is serving. - maxLength: 512 - type: string - port: - description: port is the port on which the API server is serving. - format: int32 - type: integer - required: - - host - - port - type: object - cosInstance: - description: |- - cosInstance contains options to configure a supporting IBM Cloud COS bucket for this - cluster - currently used for nodes requiring Ignition - (https://coreos.github.io/ignition/) for bootstrapping (requires - BootstrapFormatIgnition feature flag to be enabled). - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource and Ignition is set, then - 1. CosInstance.Name should be set not setting will result in webhook error. - 2. CosInstance.BucketName should be set not setting will result in webhook error. - 3. CosInstance.BucketRegion should be set not setting will result in webhook error. - properties: - bucketName: - description: bucketName is IBM cloud COS bucket name - type: string - bucketRegion: - description: bucketRegion is IBM cloud COS bucket region - type: string - name: - description: |- - name defines name of IBM cloud COS instance to be created. - when IBMPowerVSCluster.Ignition is set - maxLength: 63 - minLength: 3 - pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ - type: string - type: object - dhcpServer: - description: |- - dhcpServer is contains the configuration to be used while creating a new DHCP server in PowerVS workspace. - when the field is omitted, CLUSTER_NAME will be used as DHCPServer.Name and DHCP server will be created. - it will automatically create network with name DHCPSERVER_Private in PowerVS workspace. - properties: - cidr: - description: Optional cidr for DHCP private network - type: string - dnsServer: - default: 1.1.1.1 - description: Optional DNS Server for DHCP service - type: string - id: - description: Optional id of the existing DHCPServer - type: string - name: - description: Optional name of DHCP Service. Only alphanumeric - characters and dashes are allowed. - type: string - snat: - default: true - description: Optional indicates if SNAT will be enabled for DHCP - service - type: boolean - type: object - ignition: - description: Ignition defined options related to the bootstrapping - systems where Ignition is used. - properties: - version: - default: "2.3" - description: Version defines which version of Ignition will be - used to generate bootstrap data. - enum: - - "2.3" - - "2.4" - - "3.0" - - "3.1" - - "3.2" - - "3.3" - - "3.4" - type: string - type: object - loadBalancers: - description: |- - loadBalancers is optional configuration for configuring loadbalancers to control plane or data plane nodes. - when omitted system will create a default public loadbalancer with name CLUSTER_NAME-loadbalancer. - when specified a vpc loadbalancer will be created and controlPlaneEndpoint will be set with associated hostname of loadbalancer. - ControlPlaneEndpoint will be set with associated hostname of public loadbalancer. - when LoadBalancers[].ID is set, its expected that there exist a loadbalancer with ID or else system will give error. - when LoadBalancers[].Name is set, system will first check for loadbalancer with Name, if not exist system will create new loadbalancer. - For each loadbalancer a default backed pool and front listener will be configured with port 6443. - items: - description: VPCLoadBalancerSpec defines the desired state of an - VPC load balancer. - properties: - additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. - items: - description: |- - AdditionalListenerSpec defines the desired state of an - additional listener on an VPC load balancer. - properties: - defaultPoolName: - description: defaultPoolName defines the name of a VPC - Load Balancer Backend Pool to use for the VPC Load Balancer - Listener. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - port: - description: Port sets the port for the additional listener. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - protocol: - description: |- - protocol defines the protocol to use for the VPC Load Balancer Listener. - Will default to TCP protocol if not specified. - enum: - - http - - https - - tcp - - udp - type: string - selector: - description: |- - The selector is used to find IBMPowerVSMachines with matching labels. - If the label matches, the machine is then added to the load balancer listener configuration. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - x-kubernetes-list-type: map - backendPools: - description: backendPools defines the load balancer's backend - pools. - items: - description: VPCLoadBalancerBackendPoolSpec defines the desired - configuration of a VPC Load Balancer Backend Pool. - properties: - algorithm: - description: algorithm defines the load balancing algorithm - to use. - enum: - - least_connections - - round_robin - - weighted_round_robin - type: string - healthMonitor: - description: healthMonitor defines the backend pool's - health monitor. - properties: - delay: - description: delay defines the seconds to wait between - health checks. - format: int64 - maximum: 60 - minimum: 2 - type: integer - port: - description: port defines the port to perform health - monitoring on. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - retries: - description: retries defines the max retries for health - check. - format: int64 - maximum: 10 - minimum: 1 - type: integer - timeout: - description: timeout defines the seconds to wait for - a health check response. - format: int64 - maximum: 59 - minimum: 1 - type: integer - type: - description: type defines the protocol used for health - checks. - enum: - - http - - https - - tcp - type: string - urlPath: - description: urlPath defines the URL to use for health - monitoring. - pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ - type: string - required: - - delay - - retries - - timeout - - type - type: object - name: - description: name defines the name of the Backend Pool. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - protocol: - description: protocol defines the protocol to use for - the Backend Pool. - enum: - - http - - https - - tcp - - udp - type: string - required: - - algorithm - - healthMonitor - - protocol - type: object - type: array - id: - description: id of the loadbalancer - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: Name sets the name of the VPC load balancer. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - public: - default: true - description: public indicates that load balancer is public or - private - type: boolean - securityGroups: - description: |- - securityGroups defines the Security Groups to attach to the load balancer. - Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnets: - description: |- - subnets defines the VPC Subnets to attach to the load balancer. - Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - type: object - type: array - network: - description: |- - Network is the reference to the Network to use for this cluster. - when the field is omitted, A DHCP service will be created in the Power VS workspace and its private network will be used. - the DHCP service created network will have the following name format - 1. in the case of DHCPServer.Name is not set the name will be DHCPSERVER_Private. - 2. if DHCPServer.Name is set the name will be DHCPSERVER_Private. - when Network.ID is set, its expected that there exist a network in PowerVS workspace with id or else system will give error. - when Network.Name is set, system will first check for network with Name in PowerVS workspace, if not exist system will check DHCP network with given Network.name, if that also not exist, it will create a new DHCP service and name will be DHCPSERVER_Private. - Network.RegEx is not yet supported and system will ignore the value. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - resourceGroup: - description: |- - resourceGroup name under which the resources will be created. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - 1. it is expected to set the ResourceGroup.Name, not setting will result in webhook error. - ResourceGroup.ID and ResourceGroup.Regex is not yet supported and system will ignore the value. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstance: - description: |- - serviceInstance is the reference to the Power VS server workspace on which the server instance(VM) will be created. - Power VS server workspace is a container for all Power VS instances at a specific geographic region. - serviceInstance can be created via IBM Cloud catalog or CLI. - supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. - More detail about Power VS service instance. - https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server - when omitted system will dynamically create the service instance with name CLUSTER_NAME-serviceInstance. - when ServiceInstance.ID is set, its expected that there exist a service instance in PowerVS workspace with id or else system will give error. - when ServiceInstance.Name is set, system will first check for service instance with Name in PowerVS workspace, if not exist system will create new instance. - if there are more than one service instance exist with the ServiceInstance.Name in given Zone, installation fails with an error. Use ServiceInstance.ID in those situations to use the specific service instance. - ServiceInstance.Regex is not yet supported not yet supported and system will ignore the value. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstanceID: - description: |- - ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. - Deprecated: use ServiceInstance instead - type: string - transitGateway: - description: |- - transitGateway contains information about IBM Cloud TransitGateway - IBM Cloud TransitGateway helps in establishing network connectivity between IBM Cloud Power VS and VPC infrastructure - more information about TransitGateway can be found here https://www.ibm.com/products/transit-gateway. - when TransitGateway.ID is set, its expected that there exist a TransitGateway with ID or else system will give error. - when TransitGateway.Name is set, system will first check for TransitGateway with Name, if not exist system will create new TransitGateway. - properties: - globalRouting: - description: |- - globalRouting indicates whether to set global routing true or not while creating the transit gateway. - set this field to true only when PowerVS and VPC are from different regions, if they are same it's suggested to use local routing by setting the field to false. - when the field is omitted, based on PowerVS region (region associated with IBMPowerVSCluster.Spec.Zone) and VPC region(IBMPowerVSCluster.Spec.VPC.Region) system will decide whether to enable globalRouting or not. - type: boolean - id: - description: id of resource. - type: string - name: - description: name of resource. - maxLength: 63 - minLength: 1 - pattern: ^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$ - type: string - type: object - vpc: - description: |- - vpc contains information about IBM Cloud VPC resources. - when omitted system will dynamically create the VPC with name CLUSTER_NAME-vpc. - when VPC.ID is set, its expected that there exist a VPC with ID or else system will give error. - when VPC.Name is set, system will first check for VPC with Name, if not exist system will create new VPC. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - 1. it is expected to set the VPC.Region, not setting will result in webhook error. - properties: - id: - description: id of resource. - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: name of resource. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - region: - description: |- - region of IBM Cloud VPC. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - it is expected to set the region, not setting will result in webhook error. - type: string - type: object - vpcSecurityGroups: - description: VPCSecurityGroups to attach it to the VPC resource - items: - description: VPCSecurityGroup defines a VPC Security Group that - should exist or be created within the specified VPC, with the - specified Security Group Rules. - properties: - id: - description: id of the Security Group. - type: string - name: - description: name of the Security Group. - type: string - rules: - description: rules are the Security Group Rules for the Security - Group. - items: - description: VPCSecurityGroupRule defines a VPC Security Group - Rule for a specified Security Group. - properties: - action: - description: action defines whether to allow or deny traffic - defined by the Security Group Rule. - enum: - - allow - - deny - type: string - destination: - description: |- - destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionOutbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports allowed - for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive upper - range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive lower - range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater than or equal - to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic protocol - used for the Security Group Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines the type of - filter to define for the remote's destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, and securityGroupName - are not valid for VPCSecurityGroupRuleRemoteTypeAny - remoteType - rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid for VPCSecurityGroupRuleRemoteTypeCIDR - remoteType - rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' ? (has(self.address) - && !has(self.cidrSubnetName) && !has(self.securityGroupName)) - : true' - - message: only securityGroupName is valid for VPCSecurityGroupRuleRemoteTypeSG - remoteType - rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) - && !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only supported for - VPCSecurityGroupRuleProtocolIcmp protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - direction: - description: direction defines whether the traffic is - inbound or outbound for the Security Group Rule. - enum: - - inbound - - outbound - type: string - securityGroupID: - description: securityGroupID is the ID of the Security - Group for the Security Group Rule. - type: string - source: - description: |- - source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionInbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports allowed - for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive upper - range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive lower - range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater than or equal - to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic protocol - used for the Security Group Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines the type of - filter to define for the remote's destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, and securityGroupName - are not valid for VPCSecurityGroupRuleRemoteTypeAny - remoteType - rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid for VPCSecurityGroupRuleRemoteTypeCIDR - remoteType - rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' ? (has(self.address) - && !has(self.cidrSubnetName) && !has(self.securityGroupName)) - : true' - - message: only securityGroupName is valid for VPCSecurityGroupRuleRemoteTypeSG - remoteType - rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) - && !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only supported for - VPCSecurityGroupRuleProtocolIcmp protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - required: - - action - - direction - type: object - x-kubernetes-validations: - - message: both destination and source cannot be provided - rule: (has(self.destination) && !has(self.source)) || (!has(self.destination) - && has(self.source)) - - message: source must be set for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? has(self.source) - : true' - - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? !has(self.destination) - : true' - - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? has(self.destination) - : true' - - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? !has(self.source) - : true' - type: array - tags: - description: tags are tags to add to the Security Group. - items: - type: string - type: array - type: object - x-kubernetes-validations: - - message: either an id or name must be specified - rule: has(self.id) || has(self.name) - type: array - vpcSubnets: - description: |- - vpcSubnets contains information about IBM Cloud VPC Subnet resources. - when omitted system will create the subnets in all the zone corresponding to VPC.Region, with name CLUSTER_NAME-vpcsubnet-ZONE_NAME. - possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. - when VPCSubnets[].ID is set, its expected that there exist a subnet with ID or else system will give error. - when VPCSubnets[].Zone is not set, a random zone is picked from available zones of VPC.Region. - when VPCSubnets[].Name is not set, system will set name as CLUSTER_NAME-vpcsubnet-INDEX. - if subnet with name VPCSubnets[].Name not found, system will create new subnet in VPCSubnets[].Zone. - items: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - zone: - type: string - type: object - type: array - zone: - description: |- - zone is the name of Power VS zone where the cluster will be created - possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - 1. it is expected to set the zone, not setting will result in webhook error. - 2. the zone should have PER capabilities, or else system will give error. - type: string - required: - - network - - serviceInstanceID - type: object - status: - description: IBMPowerVSClusterStatus defines the observed state of IBMPowerVSCluster. - properties: - conditions: - description: Conditions defines current service state of the IBMPowerVSCluster. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - cosInstance: - description: cosInstance is reference to IBM Cloud COS Instance resource. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - dhcpServer: - description: dhcpServer is the reference to the Power VS DHCP server. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - loadBalancers: - additionalProperties: - description: VPCLoadBalancerStatus defines the status VPC load balancer. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - hostname: - description: hostname is the hostname of load balancer. - type: string - id: - description: id of VPC load balancer. - type: string - state: - description: State is the status of the load balancer. - type: string - type: object - description: loadBalancers reference to IBM Cloud VPC Loadbalancer. - type: object - network: - description: networkID is the reference to the Power VS network to - use for this cluster. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - ready: - default: false - description: ready is true when the provider resource is ready. - type: boolean - resourceGroupID: - description: ResourceGroup is the reference to the Power VS resource - group under which the resources will be created. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - serviceInstance: - description: serviceInstance is the reference to the Power VS service - on which the server instance(VM) will be created. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - transitGateway: - description: transitGateway is reference to IBM Cloud TransitGateway. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - powerVSConnection: - description: powerVSConnection defines the powervs connection - status in transit gateway. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - vpcConnection: - description: vpcConnection defines the vpc connection status in - transit gateway. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - type: object - v1beta2: - description: v1beta2 groups all the fields that will be added or modified - in IBMPowerVSCluster's status with the V1Beta2 version. - properties: - conditions: - description: conditions represents the observations of a DevCluster's - current state. - items: - description: Condition contains details for one aspect of the - current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - vpc: - description: vpc is reference to IBM Cloud VPC resources. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - vpcSecurityGroups: - additionalProperties: - description: VPCSecurityGroupStatus defines a vpc security group - resource status with its id and respective rule's ids. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - ruleIDs: - description: rules contains the id of rules created under the - security group - items: - type: string - type: array - type: object - description: vpcSecurityGroups is reference to IBM Cloud VPC security - group. - type: object - vpcSubnet: - additionalProperties: - description: ResourceReference identifies a resource with id. - properties: - controllerCreated: - default: false - description: controllerCreated indicates whether the resource - is created by the controller. - type: boolean - id: - description: id represents the id of the resource. - type: string - type: object - description: vpcSubnet is reference to IBM Cloud VPC subnet. - type: object - required: - - ready - type: object - type: object - served: true - storage: true - subresources: - status: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmpowervsmachines.infrastructure.cluster.x-k8s.io - spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /convert - conversionReviewVersions: - - v1 - - v1beta1 - group: infrastructure.cluster.x-k8s.io - names: - kind: IBMPowerVSMachine - listKind: IBMPowerVSMachineList - plural: ibmpowervsmachines - singular: ibmpowervsmachine - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Cluster to which this IBMPowerVSMachine belongs - jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name - name: Cluster - type: string - - description: Machine object to which this IBMPowerVSMachine belongs - jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name - name: Machine - priority: 1 - type: string - - description: Time duration since creation of IBMPowerVSMachine - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Cluster infrastructure is ready for IBM PowerVS instances - jsonPath: .status.ready - name: Ready - type: string - - description: Instance Internal Addresses - jsonPath: .status.addresses[?(@.type=="InternalIP")].address - name: Internal-IP - priority: 1 - type: string - - description: Instance External Addresses - jsonPath: .status.addresses[?(@.type=="ExternalIP")].address - name: External-IP - priority: 1 - type: string - - description: PowerVS instance state - jsonPath: .status.instanceState - name: State - type: string - - description: PowerVS instance health - jsonPath: .status.health - name: Health - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMPowerVSMachine is the Schema for the ibmpowervsmachines API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSMachineSpec defines the desired state of IBMPowerVSMachine. - properties: - image: - description: Image is the reference to the Image from which to create - the machine instance. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - imageRef: - description: |- - ImageRef is an optional reference to a provider-specific resource that holds - the details for provisioning the Image for a Cluster. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - memory: - description: Memory is Amount of memory allocated (in GB) - type: string - network: - description: Network is the reference to the Network to use for this - instance. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - procType: - description: 'ProcType is the processor type, e.g: dedicated, shared, - capped' - type: string - processors: - description: Processors is Number of processors allocated. - pattern: ^\d+(\.)?(\d)?(\d)?$ - type: string - providerID: - description: ProviderID is the unique identifier as specified by the - cloud provider. - type: string - serviceInstanceID: - description: ServiceInstanceID is the id of the power cloud instance - where the vsi instance will get deployed. - minLength: 1 - type: string - sshKey: - description: SSHKey is the name of the SSH key pair provided to the - vsi for authenticating users. - type: string - sysType: - description: SysType is the System type used to host the vsi. - type: string - required: - - network - - serviceInstanceID - type: object - status: - description: IBMPowerVSMachineStatus defines the observed state of IBMPowerVSMachine. - properties: - addresses: - description: Addresses contains the vsi associated addresses. - items: - description: NodeAddress contains information for the node's address. - properties: - address: - description: The node address. - type: string - type: - description: Node address type, one of Hostname, ExternalIP - or InternalIP. - type: string - required: - - address - - type - type: object - type: array - conditions: - description: Conditions defines current service state of the IBMPowerVSMachine. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the Machine and will contain a more verbose string suitable - for logging and human consumption. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string - failureReason: - description: |- - FailureReason will be set in the event that there is a terminal problem - reconciling the Machine and will contain a succinct value suitable - for machine interpretation. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string - fault: - description: Fault will report if any fault messages for the vsi. - type: string - health: - description: Health is the health of the vsi. - type: string - instanceID: - type: string - instanceState: - description: InstanceState is the status of the vsi. - type: string - ready: - description: Ready is true when the provider resource is ready. - type: boolean - region: - description: Region specifies the Power VS Service instance region. - type: string - zone: - description: Zone specifies the Power VS Service instance zone. - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - description: Cluster to which this IBMPowerVSMachine belongs - jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name - name: Cluster - type: string - - description: Machine object to which this IBMPowerVSMachine belongs - jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name - name: Machine - priority: 1 - type: string - - description: Time duration since creation of IBMPowerVSMachine - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Cluster infrastructure is ready for IBM PowerVS instances - jsonPath: .status.ready - name: Ready - type: string - - description: Instance Internal Addresses - jsonPath: .status.addresses[?(@.type=="InternalIP")].address - name: Internal-IP - priority: 1 - type: string - - description: Instance External Addresses - jsonPath: .status.addresses[?(@.type=="ExternalIP")].address - name: External-IP - priority: 1 - type: string - - description: PowerVS instance state - jsonPath: .status.instanceState - name: State - type: string - - description: PowerVS instance health - jsonPath: .status.health - name: Health - type: string - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMPowerVSMachine is the Schema for the ibmpowervsmachines API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSMachineSpec defines the desired state of IBMPowerVSMachine. - properties: - image: - description: |- - Image the reference to the image which is used to create the instance. - supported image identifier in IBMPowerVSResourceReference are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - imageRef: - description: |- - ImageRef is an optional reference to a provider-specific resource that holds - the details for provisioning the Image for a Cluster. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - memoryGiB: - description: |- - memoryGiB is the size of a virtual machine's memory, in GiB. - maximum value for the MemoryGiB depends on the selected SystemType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud - The minimum memory is 2 GiB. - When omitted, this means the user has no opinion and the platform is left to choose a reasonable - default, which is subject to change over time. The current default is 2. - format: int32 - type: integer - network: - description: |- - Network is the reference to the Network to use for this instance. - supported network identifier in IBMPowerVSResourceReference are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - processorType: - description: |- - processorType is the VM instance processor type. - It must be set to one of the following values: Dedicated, Capped or Shared. - Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. - Shared: Shared among other clients. - Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. - if the processorType is selected as Dedicated, then processors value cannot be fractional. - When omitted, this means that the user has no opinion and the platform is left to choose a - reasonable default, which is subject to change over time. The current default is Shared. - enum: - - Dedicated - - Shared - - Capped - - "" - type: string - processors: - anyOf: - - type: integer - - type: string - description: |- - processors is the number of virtual processors in a virtual machine. - when the processorType is selected as Dedicated the processors value cannot be fractional. - maximum value for the Processors depends on the selected SystemType, - and minimum value for Processors depends on the selected ProcessorType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud. - when ProcessorType is set as Shared or Capped, The minimum processors is 0.25. - when ProcessorType is set as Dedicated, The minimum processors is 1. - When omitted, this means that the user has no opinion and the platform is left to choose a - reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. - when ProcessorType selected as Dedicated, the default is set to 1. - when ProcessorType selected as Shared or Capped, the default is set to 0.25. - x-kubernetes-int-or-string: true - providerID: - description: ProviderID is the unique identifier as specified by the - cloud provider. - type: string - serviceInstance: - description: |- - serviceInstance is the reference to the Power VS workspace on which the server instance(VM) will be created. - Power VS workspace is a container for all Power VS instances at a specific geographic region. - serviceInstance can be created via IBM Cloud catalog or CLI. - supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. - More detail about Power VS service instance. - https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server - when omitted system will dynamically create the service instance - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstanceID: - description: |- - ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. - Deprecated: use ServiceInstance instead - type: string - sshKey: - description: SSHKey is the name of the SSH key pair provided to the - vsi for authenticating users. - type: string - systemType: - description: |- - systemType is the System type used to host the instance. - systemType determines the number of cores and memory that is available. - Few of the supported SystemTypes are s922,e980,s1022,e1050,e1080. - When omitted, this means that the user has no opinion and the platform is left to choose a - reasonable default, which is subject to change over time. The current default is s922 which is generally available. - enum: - - s922 - - e980 - - s1022 - - e1050 - - e1080 - - "" - type: string - required: - - network - - serviceInstanceID - type: object - status: - description: IBMPowerVSMachineStatus defines the observed state of IBMPowerVSMachine. - properties: - addresses: - description: Addresses contains the vsi associated addresses. - items: - description: NodeAddress contains information for the node's address. - properties: - address: - description: The node address. - type: string - type: - description: Node address type, one of Hostname, ExternalIP - or InternalIP. - type: string - required: - - address - - type - type: object - type: array - conditions: - description: Conditions defines current service state of the IBMPowerVSMachine. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the Machine and will contain a more verbose string suitable - for logging and human consumption. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - - Deprecated: This field is deprecated and is going to be removed in the next apiVersion. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. - type: string - failureReason: - description: |- - FailureReason will be set in the event that there is a terminal problem - reconciling the Machine and will contain a succinct value suitable - for machine interpretation. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - - Deprecated: This field is deprecated and is going to be removed in the next apiVersion. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. - type: string - fault: - description: Fault will report if any fault messages for the vsi. - type: string - health: - description: Health is the health of the vsi. - type: string - instanceID: - type: string - instanceState: - description: InstanceState is the status of the vsi. - type: string - ready: - description: Ready is true when the provider resource is ready. - type: boolean - region: - description: Region specifies the Power VS Service instance region. - type: string - v1beta2: - description: v1beta2 groups all the fields that will be added or modified - in IBMPowerVSMachine's status with the V1Beta2 version. - properties: - conditions: - description: |- - conditions represents the observations of a IBMPowerVSMachine's current state. - Known condition types are Ready, InstanceReady and Paused. - items: - description: Condition contains details for one aspect of the - current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - zone: - description: Zone specifies the Power VS Service instance zone. - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmpowervsmachinetemplates.infrastructure.cluster.x-k8s.io - spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /convert - conversionReviewVersions: - - v1 - - v1beta1 - group: infrastructure.cluster.x-k8s.io - names: - kind: IBMPowerVSMachineTemplate - listKind: IBMPowerVSMachineTemplateList - plural: ibmpowervsmachinetemplates - singular: ibmpowervsmachinetemplate - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMPowerVSMachineTemplate is the Schema for the ibmpowervsmachinetemplates - API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSMachineTemplateSpec defines the desired state of - IBMPowerVSMachineTemplate. - properties: - template: - description: IBMPowerVSMachineTemplateResource holds the IBMPowerVSMachine - spec. - properties: - spec: - description: IBMPowerVSMachineSpec defines the desired state of - IBMPowerVSMachine. - properties: - image: - description: Image is the reference to the Image from which - to create the machine instance. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - imageRef: - description: |- - ImageRef is an optional reference to a provider-specific resource that holds - the details for provisioning the Image for a Cluster. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - memory: - description: Memory is Amount of memory allocated (in GB) - type: string - network: - description: Network is the reference to the Network to use - for this instance. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - procType: - description: 'ProcType is the processor type, e.g: dedicated, - shared, capped' - type: string - processors: - description: Processors is Number of processors allocated. - pattern: ^\d+(\.)?(\d)?(\d)?$ - type: string - providerID: - description: ProviderID is the unique identifier as specified - by the cloud provider. - type: string - serviceInstanceID: - description: ServiceInstanceID is the id of the power cloud - instance where the vsi instance will get deployed. - minLength: 1 - type: string - sshKey: - description: SSHKey is the name of the SSH key pair provided - to the vsi for authenticating users. - type: string - sysType: - description: SysType is the System type used to host the vsi. - type: string - required: - - network - - serviceInstanceID - type: object - required: - - spec - type: object - required: - - template - type: object - status: - description: IBMPowerVSMachineTemplateStatus defines the observed state - of IBMPowerVSMachineTemplate. - properties: - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Capacity defines the resource capacity for this machine. - This value is used for autoscaling from zero operations as defined in: - https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md - type: object - type: object - type: object - served: true - storage: false - subresources: - status: {} - - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMPowerVSMachineTemplate is the Schema for the ibmpowervsmachinetemplates - API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSMachineTemplateSpec defines the desired state of - IBMPowerVSMachineTemplate. - properties: - template: - description: IBMPowerVSMachineTemplateResource holds the IBMPowerVSMachine - spec. - properties: - spec: - description: IBMPowerVSMachineSpec defines the desired state of - IBMPowerVSMachine. - properties: - image: - description: |- - Image the reference to the image which is used to create the instance. - supported image identifier in IBMPowerVSResourceReference are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - imageRef: - description: |- - ImageRef is an optional reference to a provider-specific resource that holds - the details for provisioning the Image for a Cluster. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - memoryGiB: - description: |- - memoryGiB is the size of a virtual machine's memory, in GiB. - maximum value for the MemoryGiB depends on the selected SystemType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud - The minimum memory is 2 GiB. - When omitted, this means the user has no opinion and the platform is left to choose a reasonable - default, which is subject to change over time. The current default is 2. - format: int32 - type: integer - network: - description: |- - Network is the reference to the Network to use for this instance. - supported network identifier in IBMPowerVSResourceReference are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - processorType: - description: |- - processorType is the VM instance processor type. - It must be set to one of the following values: Dedicated, Capped or Shared. - Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. - Shared: Shared among other clients. - Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. - if the processorType is selected as Dedicated, then processors value cannot be fractional. - When omitted, this means that the user has no opinion and the platform is left to choose a - reasonable default, which is subject to change over time. The current default is Shared. - enum: - - Dedicated - - Shared - - Capped - - "" - type: string - processors: - anyOf: - - type: integer - - type: string - description: |- - processors is the number of virtual processors in a virtual machine. - when the processorType is selected as Dedicated the processors value cannot be fractional. - maximum value for the Processors depends on the selected SystemType, - and minimum value for Processors depends on the selected ProcessorType, which can be found here: https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-pricing-virtual-server-on-cloud. - when ProcessorType is set as Shared or Capped, The minimum processors is 0.25. - when ProcessorType is set as Dedicated, The minimum processors is 1. - When omitted, this means that the user has no opinion and the platform is left to choose a - reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. - when ProcessorType selected as Dedicated, the default is set to 1. - when ProcessorType selected as Shared or Capped, the default is set to 0.25. - x-kubernetes-int-or-string: true - providerID: - description: ProviderID is the unique identifier as specified - by the cloud provider. - type: string - serviceInstance: - description: |- - serviceInstance is the reference to the Power VS workspace on which the server instance(VM) will be created. - Power VS workspace is a container for all Power VS instances at a specific geographic region. - serviceInstance can be created via IBM Cloud catalog or CLI. - supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. - More detail about Power VS service instance. - https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server - when omitted system will dynamically create the service instance - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstanceID: - description: |- - ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. - Deprecated: use ServiceInstance instead - type: string - sshKey: - description: SSHKey is the name of the SSH key pair provided - to the vsi for authenticating users. - type: string - systemType: - description: |- - systemType is the System type used to host the instance. - systemType determines the number of cores and memory that is available. - Few of the supported SystemTypes are s922,e980,s1022,e1050,e1080. - When omitted, this means that the user has no opinion and the platform is left to choose a - reasonable default, which is subject to change over time. The current default is s922 which is generally available. - enum: - - s922 - - e980 - - s1022 - - e1050 - - e1080 - - "" - type: string - required: - - network - - serviceInstanceID - type: object - required: - - spec - type: object - required: - - template - type: object - status: - description: IBMPowerVSMachineTemplateStatus defines the observed state - of IBMPowerVSMachineTemplate. - properties: - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Capacity defines the resource capacity for this machine. - This value is used for autoscaling from zero operations as defined in: - https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmpowervsimages.infrastructure.cluster.x-k8s.io - spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /convert - conversionReviewVersions: - - v1 - - v1beta1 - group: infrastructure.cluster.x-k8s.io - names: - kind: IBMPowerVSImage - listKind: IBMPowerVSImageList - plural: ibmpowervsimages - singular: ibmpowervsimage - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: PowerVS image state - jsonPath: .status.imageState - name: State - type: string - - description: Image is ready for IBM PowerVS instances - jsonPath: .status.ready - name: Ready - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMPowerVSImage is the Schema for the ibmpowervsimages API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSImageSpec defines the desired state of IBMPowerVSImage. - properties: - bucket: - description: Cloud Object Storage bucket name; bucket-name[/optional/folder] - type: string - clusterName: - description: ClusterName is the name of the Cluster this object belongs - to. - minLength: 1 - type: string - deletePolicy: - default: delete - description: DeletePolicy defines the policy used to identify images - to be preserved beyond the lifecycle of associated cluster. - enum: - - delete - - retain - type: string - object: - description: Cloud Object Storage image filename. - type: string - region: - description: Cloud Object Storage region. - type: string - serviceInstanceID: - description: ServiceInstanceID is the id of the power cloud instance - where the image will get imported. - type: string - storageType: - default: tier1 - description: Type of storage, storage pool with the most available - space will be selected. - enum: - - tier0 - - tier1 - - tier3 - type: string - required: - - bucket - - clusterName - - object - - region - - serviceInstanceID - type: object - status: - description: IBMPowerVSImageStatus defines the observed state of IBMPowerVSImage. - properties: - conditions: - description: Conditions defines current service state of the IBMPowerVSImage. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - imageID: - description: ImageID is the id of the imported image. - type: string - imageState: - description: ImageState is the status of the imported image. - type: string - jobID: - description: JobID is the job ID of an import operation. - type: string - ready: - description: Ready is true when the provider resource is ready. - type: boolean - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - description: PowerVS image state - jsonPath: .status.imageState - name: State - type: string - - description: Image is ready for IBM PowerVS instances - jsonPath: .status.ready - name: Ready - type: string - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMPowerVSImage is the Schema for the ibmpowervsimages API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSImageSpec defines the desired state of IBMPowerVSImage. - properties: - bucket: - description: Cloud Object Storage bucket name; bucket-name[/optional/folder] - type: string - clusterName: - description: ClusterName is the name of the Cluster this object belongs - to. - minLength: 1 - type: string - deletePolicy: - default: delete - description: DeletePolicy defines the policy used to identify images - to be preserved beyond the lifecycle of associated cluster. - enum: - - delete - - retain - type: string - object: - description: Cloud Object Storage image filename. - type: string - region: - description: Cloud Object Storage region. - type: string - serviceInstance: - description: |- - serviceInstance is the reference to the Power VS workspace on which the server instance(VM) will be created. - Power VS workspace is a container for all Power VS instances at a specific geographic region. - serviceInstance can be created via IBM Cloud catalog or CLI. - supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. - More detail about Power VS service instance. - https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server - when omitted system will dynamically create the service instance - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstanceID: - description: |- - ServiceInstanceID is the id of the power cloud instance where the image will get imported. - Deprecated: use ServiceInstance instead - type: string - storageType: - default: tier1 - description: Type of storage, storage pool with the most available - space will be selected. - enum: - - tier0 - - tier1 - - tier3 - type: string - required: - - bucket - - clusterName - - object - - region - - serviceInstanceID - type: object - status: - description: IBMPowerVSImageStatus defines the observed state of IBMPowerVSImage. - properties: - conditions: - description: Conditions defines current service state of the IBMPowerVSImage. - items: - description: Condition defines an observation of a Cluster API resource - operational state. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 - type: string - reason: - description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 - minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - imageID: - description: ImageID is the id of the imported image. - type: string - imageState: - description: ImageState is the status of the imported image. - type: string - jobID: - description: JobID is the job ID of an import operation. - type: string - ready: - description: Ready is true when the provider resource is ready. - type: boolean - v1beta2: - description: v1beta2 groups all the fields that will be added or modified - in IBMPowerVSCluster's status with the V1Beta2 version. - properties: - conditions: - description: conditions represents the observations of a DevCluster's - current state. - items: - description: Condition contains details for one aspect of the - current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmpowervsclustertemplates.infrastructure.cluster.x-k8s.io - spec: - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: IBMPowerVSClusterTemplate - listKind: IBMPowerVSClusterTemplateList - plural: ibmpowervsclustertemplates - shortNames: - - ibmpowervsct - singular: ibmpowervsclustertemplate - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Time duration since creation of IBMPowerVSClusterTemplate - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: IBMPowerVSClusterTemplate is the schema for IBM Power VS Kubernetes - Cluster Templates. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSClusterTemplateSpec defines the desired state of - IBMPowerVSClusterTemplate. - properties: - template: - description: IBMPowerVSClusterTemplateResource describes the data - needed to create an IBMPowerVSCluster from a template. - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - labels is a map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - type: object - spec: - description: IBMPowerVSClusterSpec defines the desired state of - IBMPowerVSCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint - used to communicate with the control plane. - properties: - host: - description: host is the hostname on which the API server - is serving. - maxLength: 512 - type: string - port: - description: port is the port on which the API server - is serving. - format: int32 - type: integer - required: - - host - - port - type: object - network: - description: Network is the reference to the Network to use - for this cluster. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstanceID: - description: ServiceInstanceID is the id of the power cloud - instance where the vsi instance will get deployed. - minLength: 1 - type: string - required: - - network - - serviceInstanceID - type: object - required: - - spec - type: object - required: - - template - type: object - type: object - served: true - storage: false - subresources: {} - - additionalPrinterColumns: - - description: Time duration since creation of IBMPowerVSClusterTemplate - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMPowerVSClusterTemplate is the schema for IBM Power VS Kubernetes - Cluster Templates. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMPowerVSClusterTemplateSpec defines the desired state of - IBMPowerVSClusterTemplate. - properties: - template: - description: IBMPowerVSClusterTemplateResource describes the data - needed to create an IBMPowerVSCluster from a template. - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - labels is a map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - type: object - spec: - description: IBMPowerVSClusterSpec defines the desired state of - IBMPowerVSCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint - used to communicate with the control plane. - properties: - host: - description: host is the hostname on which the API server - is serving. - maxLength: 512 - type: string - port: - description: port is the port on which the API server - is serving. - format: int32 - type: integer - required: - - host - - port - type: object - cosInstance: - description: |- - cosInstance contains options to configure a supporting IBM Cloud COS bucket for this - cluster - currently used for nodes requiring Ignition - (https://coreos.github.io/ignition/) for bootstrapping (requires - BootstrapFormatIgnition feature flag to be enabled). - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource and Ignition is set, then - 1. CosInstance.Name should be set not setting will result in webhook error. - 2. CosInstance.BucketName should be set not setting will result in webhook error. - 3. CosInstance.BucketRegion should be set not setting will result in webhook error. - properties: - bucketName: - description: bucketName is IBM cloud COS bucket name - type: string - bucketRegion: - description: bucketRegion is IBM cloud COS bucket region - type: string - name: - description: |- - name defines name of IBM cloud COS instance to be created. - when IBMPowerVSCluster.Ignition is set - maxLength: 63 - minLength: 3 - pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ - type: string - type: object - dhcpServer: - description: |- - dhcpServer is contains the configuration to be used while creating a new DHCP server in PowerVS workspace. - when the field is omitted, CLUSTER_NAME will be used as DHCPServer.Name and DHCP server will be created. - it will automatically create network with name DHCPSERVER_Private in PowerVS workspace. - properties: - cidr: - description: Optional cidr for DHCP private network - type: string - dnsServer: - default: 1.1.1.1 - description: Optional DNS Server for DHCP service - type: string - id: - description: Optional id of the existing DHCPServer - type: string - name: - description: Optional name of DHCP Service. Only alphanumeric - characters and dashes are allowed. - type: string - snat: - default: true - description: Optional indicates if SNAT will be enabled - for DHCP service - type: boolean - type: object - ignition: - description: Ignition defined options related to the bootstrapping - systems where Ignition is used. - properties: - version: - default: "2.3" - description: Version defines which version of Ignition - will be used to generate bootstrap data. - enum: - - "2.3" - - "2.4" - - "3.0" - - "3.1" - - "3.2" - - "3.3" - - "3.4" - type: string - type: object - loadBalancers: - description: |- - loadBalancers is optional configuration for configuring loadbalancers to control plane or data plane nodes. - when omitted system will create a default public loadbalancer with name CLUSTER_NAME-loadbalancer. - when specified a vpc loadbalancer will be created and controlPlaneEndpoint will be set with associated hostname of loadbalancer. - ControlPlaneEndpoint will be set with associated hostname of public loadbalancer. - when LoadBalancers[].ID is set, its expected that there exist a loadbalancer with ID or else system will give error. - when LoadBalancers[].Name is set, system will first check for loadbalancer with Name, if not exist system will create new loadbalancer. - For each loadbalancer a default backed pool and front listener will be configured with port 6443. - items: - description: VPCLoadBalancerSpec defines the desired state - of an VPC load balancer. - properties: - additionalListeners: - description: AdditionalListeners sets the additional - listeners for the control plane load balancer. - items: - description: |- - AdditionalListenerSpec defines the desired state of an - additional listener on an VPC load balancer. - properties: - defaultPoolName: - description: defaultPoolName defines the name - of a VPC Load Balancer Backend Pool to use for - the VPC Load Balancer Listener. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - port: - description: Port sets the port for the additional - listener. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - protocol: - description: |- - protocol defines the protocol to use for the VPC Load Balancer Listener. - Will default to TCP protocol if not specified. - enum: - - http - - https - - tcp - - udp - type: string - selector: - description: |- - The selector is used to find IBMPowerVSMachines with matching labels. - If the label matches, the machine is then added to the load balancer listener configuration. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - x-kubernetes-list-type: map - backendPools: - description: backendPools defines the load balancer's - backend pools. - items: - description: VPCLoadBalancerBackendPoolSpec defines - the desired configuration of a VPC Load Balancer - Backend Pool. - properties: - algorithm: - description: algorithm defines the load balancing - algorithm to use. - enum: - - least_connections - - round_robin - - weighted_round_robin - type: string - healthMonitor: - description: healthMonitor defines the backend - pool's health monitor. - properties: - delay: - description: delay defines the seconds to - wait between health checks. - format: int64 - maximum: 60 - minimum: 2 - type: integer - port: - description: port defines the port to perform - health monitoring on. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - retries: - description: retries defines the max retries - for health check. - format: int64 - maximum: 10 - minimum: 1 - type: integer - timeout: - description: timeout defines the seconds to - wait for a health check response. - format: int64 - maximum: 59 - minimum: 1 - type: integer - type: - description: type defines the protocol used - for health checks. - enum: - - http - - https - - tcp - type: string - urlPath: - description: urlPath defines the URL to use - for health monitoring. - pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ - type: string - required: - - delay - - retries - - timeout - - type - type: object - name: - description: name defines the name of the Backend - Pool. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - protocol: - description: protocol defines the protocol to - use for the Backend Pool. - enum: - - http - - https - - tcp - - udp - type: string - required: - - algorithm - - healthMonitor - - protocol - type: object - type: array - id: - description: id of the loadbalancer - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: Name sets the name of the VPC load balancer. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - public: - default: true - description: public indicates that load balancer is - public or private - type: boolean - securityGroups: - description: |- - securityGroups defines the Security Groups to attach to the load balancer. - Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnets: - description: |- - subnets defines the VPC Subnets to attach to the load balancer. - Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - type: object - type: array - network: - description: |- - Network is the reference to the Network to use for this cluster. - when the field is omitted, A DHCP service will be created in the Power VS workspace and its private network will be used. - the DHCP service created network will have the following name format - 1. in the case of DHCPServer.Name is not set the name will be DHCPSERVER_Private. - 2. if DHCPServer.Name is set the name will be DHCPSERVER_Private. - when Network.ID is set, its expected that there exist a network in PowerVS workspace with id or else system will give error. - when Network.Name is set, system will first check for network with Name in PowerVS workspace, if not exist system will check DHCP network with given Network.name, if that also not exist, it will create a new DHCP service and name will be DHCPSERVER_Private. - Network.RegEx is not yet supported and system will ignore the value. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - resourceGroup: - description: |- - resourceGroup name under which the resources will be created. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - 1. it is expected to set the ResourceGroup.Name, not setting will result in webhook error. - ResourceGroup.ID and ResourceGroup.Regex is not yet supported and system will ignore the value. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstance: - description: |- - serviceInstance is the reference to the Power VS server workspace on which the server instance(VM) will be created. - Power VS server workspace is a container for all Power VS instances at a specific geographic region. - serviceInstance can be created via IBM Cloud catalog or CLI. - supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. - More detail about Power VS service instance. - https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server - when omitted system will dynamically create the service instance with name CLUSTER_NAME-serviceInstance. - when ServiceInstance.ID is set, its expected that there exist a service instance in PowerVS workspace with id or else system will give error. - when ServiceInstance.Name is set, system will first check for service instance with Name in PowerVS workspace, if not exist system will create new instance. - if there are more than one service instance exist with the ServiceInstance.Name in given Zone, installation fails with an error. Use ServiceInstance.ID in those situations to use the specific service instance. - ServiceInstance.Regex is not yet supported not yet supported and system will ignore the value. - properties: - id: - description: ID of resource - minLength: 1 - type: string - name: - description: Name of resource - minLength: 1 - type: string - regex: - description: |- - Regular expression to match resource, - In case of multiple resources matches the provided regular expression the first matched resource will be selected - minLength: 1 - type: string - type: object - serviceInstanceID: - description: |- - ServiceInstanceID is the id of the power cloud instance where the vsi instance will get deployed. - Deprecated: use ServiceInstance instead - type: string - transitGateway: - description: |- - transitGateway contains information about IBM Cloud TransitGateway - IBM Cloud TransitGateway helps in establishing network connectivity between IBM Cloud Power VS and VPC infrastructure - more information about TransitGateway can be found here https://www.ibm.com/products/transit-gateway. - when TransitGateway.ID is set, its expected that there exist a TransitGateway with ID or else system will give error. - when TransitGateway.Name is set, system will first check for TransitGateway with Name, if not exist system will create new TransitGateway. - properties: - globalRouting: - description: |- - globalRouting indicates whether to set global routing true or not while creating the transit gateway. - set this field to true only when PowerVS and VPC are from different regions, if they are same it's suggested to use local routing by setting the field to false. - when the field is omitted, based on PowerVS region (region associated with IBMPowerVSCluster.Spec.Zone) and VPC region(IBMPowerVSCluster.Spec.VPC.Region) system will decide whether to enable globalRouting or not. - type: boolean - id: - description: id of resource. - type: string - name: - description: name of resource. - maxLength: 63 - minLength: 1 - pattern: ^([a-zA-Z]|[a-zA-Z][-_a-zA-Z0-9]*[a-zA-Z0-9])$ - type: string - type: object - vpc: - description: |- - vpc contains information about IBM Cloud VPC resources. - when omitted system will dynamically create the VPC with name CLUSTER_NAME-vpc. - when VPC.ID is set, its expected that there exist a VPC with ID or else system will give error. - when VPC.Name is set, system will first check for VPC with Name, if not exist system will create new VPC. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - 1. it is expected to set the VPC.Region, not setting will result in webhook error. - properties: - id: - description: id of resource. - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: name of resource. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - region: - description: |- - region of IBM Cloud VPC. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - it is expected to set the region, not setting will result in webhook error. - type: string - type: object - vpcSecurityGroups: - description: VPCSecurityGroups to attach it to the VPC resource - items: - description: VPCSecurityGroup defines a VPC Security Group - that should exist or be created within the specified VPC, - with the specified Security Group Rules. - properties: - id: - description: id of the Security Group. - type: string - name: - description: name of the Security Group. - type: string - rules: - description: rules are the Security Group Rules for - the Security Group. - items: - description: VPCSecurityGroupRule defines a VPC Security - Group Rule for a specified Security Group. - properties: - action: - description: action defines whether to allow or - deny traffic defined by the Security Group Rule. - enum: - - allow - - deny - type: string - destination: - description: |- - destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionOutbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports - allowed for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive - upper range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive - lower range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater than - or equal to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic - protocol used for the Security Group Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines the - type of filter to define for the remote's - destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, and - securityGroupName are not valid for - VPCSecurityGroupRuleRemoteTypeAny remoteType - rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid - for VPCSecurityGroupRuleRemoteTypeCIDR - remoteType - rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' - ? (has(self.address) && !has(self.cidrSubnetName) - && !has(self.securityGroupName)) : true' - - message: only securityGroupName is valid - for VPCSecurityGroupRuleRemoteTypeSG - remoteType - rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) - && !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only supported - for VPCSecurityGroupRuleProtocolIcmp protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - direction: - description: direction defines whether the traffic - is inbound or outbound for the Security Group - Rule. - enum: - - inbound - - outbound - type: string - securityGroupID: - description: securityGroupID is the ID of the - Security Group for the Security Group Rule. - type: string - source: - description: |- - source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionInbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports - allowed for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive - upper range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive - lower range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater than - or equal to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic - protocol used for the Security Group Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines the - type of filter to define for the remote's - destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, and - securityGroupName are not valid for - VPCSecurityGroupRuleRemoteTypeAny remoteType - rule: 'self.remoteType == ''any'' ? (!has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid - for VPCSecurityGroupRuleRemoteTypeCIDR - remoteType - rule: 'self.remoteType == ''cidr'' ? (has(self.cidrSubnetName) - && !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only address is valid for VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' - ? (has(self.address) && !has(self.cidrSubnetName) - && !has(self.securityGroupName)) : true' - - message: only securityGroupName is valid - for VPCSecurityGroupRuleRemoteTypeSG - remoteType - rule: 'self.remoteType == ''sg'' ? (has(self.securityGroupName) - && !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only supported - for VPCSecurityGroupRuleProtocolIcmp protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - required: - - action - - direction - type: object - x-kubernetes-validations: - - message: both destination and source cannot be provided - rule: (has(self.destination) && !has(self.source)) - || (!has(self.destination) && has(self.source)) - - message: source must be set for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? has(self.source) - : true' - - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? !has(self.destination) - : true' - - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? has(self.destination) - : true' - - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? !has(self.source) - : true' - type: array - tags: - description: tags are tags to add to the Security Group. - items: - type: string - type: array - type: object - x-kubernetes-validations: - - message: either an id or name must be specified - rule: has(self.id) || has(self.name) - type: array - vpcSubnets: - description: |- - vpcSubnets contains information about IBM Cloud VPC Subnet resources. - when omitted system will create the subnets in all the zone corresponding to VPC.Region, with name CLUSTER_NAME-vpcsubnet-ZONE_NAME. - possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. - when VPCSubnets[].ID is set, its expected that there exist a subnet with ID or else system will give error. - when VPCSubnets[].Zone is not set, a random zone is picked from available zones of VPC.Region. - when VPCSubnets[].Name is not set, system will set name as CLUSTER_NAME-vpcsubnet-INDEX. - if subnet with name VPCSubnets[].Name not found, system will create new subnet in VPCSubnets[].Zone. - items: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - zone: - type: string - type: object - type: array - zone: - description: |- - zone is the name of Power VS zone where the cluster will be created - possible values can be found here https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server. - when powervs.cluster.x-k8s.io/create-infra=true annotation is set on IBMPowerVSCluster resource, - 1. it is expected to set the zone, not setting will result in webhook error. - 2. the zone should have PER capabilities, or else system will give error. - type: string - required: - - network - - serviceInstanceID - type: object - required: - - spec - type: object - required: - - template - type: object - type: object - served: true - storage: true - subresources: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.18.0 - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - cluster.x-k8s.io/v1beta1: v1beta1_v1beta2 - clusterctl.cluster.x-k8s.io: "" - name: ibmvpcclustertemplates.infrastructure.cluster.x-k8s.io - spec: - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: IBMVPCClusterTemplate - listKind: IBMVPCClusterTemplateList - plural: ibmvpcclustertemplates - shortNames: - - ibmvpcct - singular: ibmvpcclustertemplate - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Time duration since creation of IBMVPCClusterTemplate - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta2 - schema: - openAPIV3Schema: - description: IBMVPCClusterTemplate is the Schema for the ibmvpcclustertemplates - API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IBMVPCClusterTemplateSpec defines the desired state of IBMVPCClusterTemplate. - properties: - template: - description: IBMVPCClusterTemplateResource describes the data needed - to create an IBMVPCCluster from a template. - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - labels is a map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - type: object - spec: - description: IBMVPCClusterSpec defines the desired state of IBMVPCCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint - used to communicate with the control plane. - properties: - host: - description: host is the hostname on which the API server - is serving. - maxLength: 512 - type: string - port: - description: port is the port on which the API server - is serving. - format: int32 - type: integer - required: - - host - - port - type: object - controlPlaneLoadBalancer: - description: |- - ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. - Use this for legacy support, use Network.LoadBalancers for the extended VPC support. - properties: - additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. - items: - description: |- - AdditionalListenerSpec defines the desired state of an - additional listener on an VPC load balancer. - properties: - defaultPoolName: - description: defaultPoolName defines the name of - a VPC Load Balancer Backend Pool to use for the - VPC Load Balancer Listener. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - port: - description: Port sets the port for the additional - listener. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - protocol: - description: |- - protocol defines the protocol to use for the VPC Load Balancer Listener. - Will default to TCP protocol if not specified. - enum: - - http - - https - - tcp - - udp - type: string - selector: - description: |- - The selector is used to find IBMPowerVSMachines with matching labels. - If the label matches, the machine is then added to the load balancer listener configuration. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - x-kubernetes-list-type: map - backendPools: - description: backendPools defines the load balancer's - backend pools. - items: - description: VPCLoadBalancerBackendPoolSpec defines - the desired configuration of a VPC Load Balancer Backend - Pool. - properties: - algorithm: - description: algorithm defines the load balancing - algorithm to use. - enum: - - least_connections - - round_robin - - weighted_round_robin - type: string - healthMonitor: - description: healthMonitor defines the backend pool's - health monitor. - properties: - delay: - description: delay defines the seconds to wait - between health checks. - format: int64 - maximum: 60 - minimum: 2 - type: integer - port: - description: port defines the port to perform - health monitoring on. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - retries: - description: retries defines the max retries - for health check. - format: int64 - maximum: 10 - minimum: 1 - type: integer - timeout: - description: timeout defines the seconds to - wait for a health check response. - format: int64 - maximum: 59 - minimum: 1 - type: integer - type: - description: type defines the protocol used - for health checks. - enum: - - http - - https - - tcp - type: string - urlPath: - description: urlPath defines the URL to use - for health monitoring. - pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ - type: string - required: - - delay - - retries - - timeout - - type - type: object - name: - description: name defines the name of the Backend - Pool. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - protocol: - description: protocol defines the protocol to use - for the Backend Pool. - enum: - - http - - https - - tcp - - udp - type: string - required: - - algorithm - - healthMonitor - - protocol - type: object - type: array - id: - description: id of the loadbalancer - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: Name sets the name of the VPC load balancer. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - public: - default: true - description: public indicates that load balancer is public - or private - type: boolean - securityGroups: - description: |- - securityGroups defines the Security Groups to attach to the load balancer. - Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnets: - description: |- - subnets defines the VPC Subnets to attach to the load balancer. - Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - type: object - image: - description: image represents the Image details used for the - cluster. - properties: - cosBucket: - description: cosBucket is the name of the IBM Cloud COS - Bucket containing the source of the image, if necessary. - type: string - cosBucketRegion: - description: cosBucketRegion is the COS region the bucket - is in. - type: string - cosInstance: - description: cosInstance is the name of the IBM Cloud - COS Instance containing the source of the image, if - necessary. - type: string - cosObject: - description: cosObject is the name of a IBM Cloud COS - Object used as the source of the image, if necessary. - type: string - crn: - description: crn is the IBM Cloud CRN of the existing - VPC Custom Image. - type: string - name: - description: name is the name of the desired VPC Custom - Image. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - operatingSystem: - description: operatingSystem is the Custom Image's Operating - System name. - type: string - resourceGroup: - description: resourceGroup is the Resource Group to create - the Custom Image in. - properties: - id: - description: id defines the IBM Cloud Resource ID. - type: string - name: - description: name defines the IBM Cloud Resource Name. - type: string - required: - - id - type: object - type: object - x-kubernetes-validations: - - message: if any of cosInstance, cosBucket, or cosObject - are specified, all must be specified - rule: (!has(self.cosInstance) && !has(self.cosBucket) && - !has(self.cosObject)) || (has(self.cosInstance) && has(self.cosBucket) - && has(self.cosObject)) - - message: an existing image name or crn must be provided, - or to create a new image the cos resources must be provided, - with or without a name - rule: has(self.name) || has(self.crn) || (has(self.cosInstance) - && has(self.cosBucket) && has(self.cosObject)) - network: - description: network represents the VPC network to use for - the cluster. - properties: - controlPlaneSubnets: - description: controlPlaneSubnets is a set of Subnet's - which define the Control Plane subnets. - items: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - zone: - type: string - type: object - type: array - loadBalancers: - description: loadBalancers is a set of VPC Load Balancer - definitions to use for the cluster. - items: - description: VPCLoadBalancerSpec defines the desired - state of an VPC load balancer. - properties: - additionalListeners: - description: AdditionalListeners sets the additional - listeners for the control plane load balancer. - items: - description: |- - AdditionalListenerSpec defines the desired state of an - additional listener on an VPC load balancer. - properties: - defaultPoolName: - description: defaultPoolName defines the name - of a VPC Load Balancer Backend Pool to use - for the VPC Load Balancer Listener. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - port: - description: Port sets the port for the additional - listener. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - protocol: - description: |- - protocol defines the protocol to use for the VPC Load Balancer Listener. - Will default to TCP protocol if not specified. - enum: - - http - - https - - tcp - - udp - type: string - selector: - description: |- - The selector is used to find IBMPowerVSMachines with matching labels. - If the label matches, the machine is then added to the load balancer listener configuration. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - x-kubernetes-list-type: map - backendPools: - description: backendPools defines the load balancer's - backend pools. - items: - description: VPCLoadBalancerBackendPoolSpec defines - the desired configuration of a VPC Load Balancer - Backend Pool. - properties: - algorithm: - description: algorithm defines the load balancing - algorithm to use. - enum: - - least_connections - - round_robin - - weighted_round_robin - type: string - healthMonitor: - description: healthMonitor defines the backend - pool's health monitor. - properties: - delay: - description: delay defines the seconds - to wait between health checks. - format: int64 - maximum: 60 - minimum: 2 - type: integer - port: - description: port defines the port to - perform health monitoring on. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - retries: - description: retries defines the max retries - for health check. - format: int64 - maximum: 10 - minimum: 1 - type: integer - timeout: - description: timeout defines the seconds - to wait for a health check response. - format: int64 - maximum: 59 - minimum: 1 - type: integer - type: - description: type defines the protocol - used for health checks. - enum: - - http - - https - - tcp - type: string - urlPath: - description: urlPath defines the URL to - use for health monitoring. - pattern: ^\/(([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})+(\/([a-zA-Z0-9-._~!$&'()*+,;=:@]|%[a-fA-F0-9]{2})*)*)?(\\?([a-zA-Z0-9-._~!$&'()*+,;=:@\/?]|%[a-fA-F0-9]{2})*)?$ - type: string - required: - - delay - - retries - - timeout - - type - type: object - name: - description: name defines the name of the - Backend Pool. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - protocol: - description: protocol defines the protocol - to use for the Backend Pool. - enum: - - http - - https - - tcp - - udp - type: string - required: - - algorithm - - healthMonitor - - protocol - type: object - type: array - id: - description: id of the loadbalancer - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - description: Name sets the name of the VPC load - balancer. - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - public: - default: true - description: public indicates that load balancer - is public or private - type: boolean - securityGroups: - description: |- - securityGroups defines the Security Groups to attach to the load balancer. - Security Groups defined here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - subnets: - description: |- - subnets defines the VPC Subnets to attach to the load balancer. - Subnets defiens here are expected to already exist when the load balancer is reconciled (these do not get created when reconciling the load balancer). - items: - description: VPCResource represents a VPC resource. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - type: array - type: object - type: array - resourceGroup: - description: |- - resourceGroup is the Resource Group containing all of the newtork resources. - This can be different than the Resource Group containing the remaining cluster resources. - properties: - id: - description: id defines the IBM Cloud Resource ID. - type: string - name: - description: name defines the IBM Cloud Resource Name. - type: string - required: - - id - type: object - securityGroups: - description: securityGroups is a set of VPCSecurityGroup's - which define the VPC Security Groups that manage traffic - within and out of the VPC. - items: - description: VPCSecurityGroup defines a VPC Security - Group that should exist or be created within the specified - VPC, with the specified Security Group Rules. - properties: - id: - description: id of the Security Group. - type: string - name: - description: name of the Security Group. - type: string - rules: - description: rules are the Security Group Rules - for the Security Group. - items: - description: VPCSecurityGroupRule defines a VPC - Security Group Rule for a specified Security - Group. - properties: - action: - description: action defines whether to allow - or deny traffic defined by the Security - Group Rule. - enum: - - allow - - deny - type: string - destination: - description: |- - destination is a VPCSecurityGroupRulePrototype which defines the destination of outbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionOutbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports - allowed for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive - upper range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive - lower range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater - than or equal to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic - protocol used for the Security Group - Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines - the type of filter to define for - the remote's destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, - and securityGroupName are not valid - for VPCSecurityGroupRuleRemoteTypeAny - remoteType - rule: 'self.remoteType == ''any'' - ? (!has(self.cidrSubnetName) && - !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid - for VPCSecurityGroupRuleRemoteTypeCIDR - remoteType - rule: 'self.remoteType == ''cidr'' - ? (has(self.cidrSubnetName) && !has(self.address) - && !has(self.securityGroupName)) - : true' - - message: only address is valid for - VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' - ? (has(self.address) && !has(self.cidrSubnetName) - && !has(self.securityGroupName)) - : true' - - message: only securityGroupName is - valid for VPCSecurityGroupRuleRemoteTypeSG - remoteType - rule: 'self.remoteType == ''sg'' ? - (has(self.securityGroupName) && - !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only - supported for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - direction: - description: direction defines whether the - traffic is inbound or outbound for the Security - Group Rule. - enum: - - inbound - - outbound - type: string - securityGroupID: - description: securityGroupID is the ID of - the Security Group for the Security Group - Rule. - type: string - source: - description: |- - source is a VPCSecurityGroupRulePrototype which defines the source of inbound traffic for the Security Group Rule. - Only used when direction is VPCSecurityGroupRuleDirectionInbound. - properties: - icmpCode: - description: |- - icmpCode is the ICMP code for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - icmpType: - description: |- - icmpType is the ICMP type for the Rule. - Only used when Protocol is VPCSecurityGroupRuleProtocolIcmp. - format: int64 - type: integer - portRange: - description: portRange is a range of ports - allowed for the Rule's remote. - properties: - maximumPort: - description: maximumPort is the inclusive - upper range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - minimumPort: - description: minimumPort is the inclusive - lower range of ports. - format: int64 - maximum: 65535 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: maximum port must be greater - than or equal to minimum port - rule: self.maximumPort >= self.minimumPort - protocol: - description: protocol defines the traffic - protocol used for the Security Group - Rule. - enum: - - all - - icmp - - tcp - - udp - type: string - remotes: - description: |- - remotes is a set of VPCSecurityGroupRuleRemote's that define the traffic allowed by the Rule's remote. - Specifying multiple VPCSecurityGroupRuleRemote's creates a unique Security Group Rule with the shared Protocol, PortRange, etc. - This allows for easier management of Security Group Rule's for sets of CIDR's, IP's, etc. - items: - description: |- - VPCSecurityGroupRuleRemote defines a VPC Security Group Rule's remote details. - The type of remote defines the additional remote details where are used for defining the remote. - properties: - address: - description: |2- - address is the address to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeAddress. - type: string - cidrSubnetName: - description: |- - cidrSubnetName is the name of the VPC Subnet to retrieve the CIDR from, to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeCIDR. - type: string - remoteType: - description: remoteType defines - the type of filter to define for - the remote's destination/source. - enum: - - any - - cidr - - address - - sg - type: string - securityGroupName: - description: |- - securityGroupName is the name of the VPC Security Group to use for the remote's destination/source. - Only used when remoteType is VPCSecurityGroupRuleRemoteTypeSG - type: string - required: - - remoteType - type: object - x-kubernetes-validations: - - message: cidrSubnetName, addresss, - and securityGroupName are not valid - for VPCSecurityGroupRuleRemoteTypeAny - remoteType - rule: 'self.remoteType == ''any'' - ? (!has(self.cidrSubnetName) && - !has(self.address) && !has(self.securityGroupName)) - : true' - - message: only cidrSubnetName is valid - for VPCSecurityGroupRuleRemoteTypeCIDR - remoteType - rule: 'self.remoteType == ''cidr'' - ? (has(self.cidrSubnetName) && !has(self.address) - && !has(self.securityGroupName)) - : true' - - message: only address is valid for - VPCSecurityGroupRuleRemoteTypeIP - remoteType - rule: 'self.remoteType == ''address'' - ? (has(self.address) && !has(self.cidrSubnetName) - && !has(self.securityGroupName)) - : true' - - message: only securityGroupName is - valid for VPCSecurityGroupRuleRemoteTypeSG - remoteType - rule: 'self.remoteType == ''sg'' ? - (has(self.securityGroupName) && - !has(self.cidrSubnetName) && !has(self.address)) - : true' - type: array - required: - - protocol - - remotes - type: object - x-kubernetes-validations: - - message: icmpCode and icmpType are only - supported for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol != ''icmp'' ? (!has(self.icmpCode) - && !has(self.icmpType)) : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolAll - protocol - rule: 'self.protocol == ''all'' ? !has(self.portRange) - : true' - - message: portRange is not valid for VPCSecurityGroupRuleProtocolIcmp - protocol - rule: 'self.protocol == ''icmp'' ? !has(self.portRange) - : true' - required: - - action - - direction - type: object - x-kubernetes-validations: - - message: both destination and source cannot - be provided - rule: (has(self.destination) && !has(self.source)) - || (!has(self.destination) && has(self.source)) - - message: source must be set for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? has(self.source) - : true' - - message: destination is not valid for VPCSecurityGroupRuleDirectionInbound - direction - rule: 'self.direction == ''inbound'' ? !has(self.destination) - : true' - - message: destination must be set for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? has(self.destination) - : true' - - message: source is not valid for VPCSecurityGroupRuleDirectionOutbound - direction - rule: 'self.direction == ''outbound'' ? !has(self.source) - : true' - type: array - tags: - description: tags are tags to add to the Security - Group. - items: - type: string - type: array - type: object - x-kubernetes-validations: - - message: either an id or name must be specified - rule: has(self.id) || has(self.name) - type: array - vpc: - description: vpc defines the IBM Cloud VPC for extended - VPC Infrastructure support. - properties: - id: - description: id of the resource. - minLength: 1 - type: string - name: - description: name of the resource. - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: an id or name must be provided - rule: has(self.id) || has(self.name) - workerSubnets: - description: workerSubnets is a set of Subnet's which - define the Worker subnets. - items: - description: Subnet describes a subnet. - properties: - cidr: - type: string - id: - maxLength: 64 - minLength: 1 - pattern: ^[-0-9a-z_]+$ - type: string - name: - maxLength: 63 - minLength: 1 - pattern: ^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$ - type: string - zone: - type: string - type: object - type: array - type: object - region: - description: The IBM Cloud Region the cluster lives in. - type: string - resourceGroup: - description: The VPC resources should be created under the - resource group. - type: string - vpc: - description: The Name of VPC. - type: string - zone: - description: The Name of availability zone. - type: string - required: - - region - - resourceGroup - type: object - required: - - spec - type: object - type: object - type: object - served: true - storage: true - subresources: {} - status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - annotations: - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-manager-role - rules: - - apiGroups: - - "" - resources: - - events - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - - apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create - - apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create - - apiGroups: - - cluster.x-k8s.io - resources: - - clusters - - clusters/status - - machines - - machines/status - verbs: - - get - - list - - watch - - apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - ibmpowervsclusters - - ibmpowervsimages - - ibmpowervsmachines - - ibmvpcclusters - - ibmvpcmachines - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - ibmpowervsclusters/status - - ibmpowervsimages/status - - ibmpowervsmachines/status - - ibmpowervsmachinetemplates/status - - ibmvpcclusters/status - - ibmvpcmachines/status - - ibmvpcmachinetemplates/status - verbs: - - get - - patch - - update - - apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - ibmpowervsmachinetemplates - - ibmvpcmachinetemplates - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - annotations: - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-manager-rolebinding - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: capi-ibmcloud-manager-role - subjects: - - kind: ServiceAccount - name: capi-ibmcloud-manager - namespace: openshift-cluster-api - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - annotations: - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-leader-elect-role - namespace: openshift-cluster-api - rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - - apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - annotations: - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-leader-elect-rolebinding - namespace: openshift-cluster-api - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: capi-ibmcloud-leader-elect-role - subjects: - - kind: ServiceAccount - name: capi-ibmcloud-manager - namespace: openshift-cluster-api - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - annotations: - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-manager - namespace: openshift-cluster-api - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - control-plane: controller-manager - name: capi-ibmcloud-controller-manager - namespace: openshift-cluster-api - spec: - replicas: 1 - selector: - matchLabels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - control-plane: controller-manager - strategy: {} - template: - metadata: - annotations: - target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - control-plane: controller-manager - spec: - containers: - - args: - - --leader-elect - - --provider-id-fmt=${PROVIDER_ID_FORMAT:=v2} - - --diagnostics-address=${CAPIBM_DIAGNOSTICS_ADDRESS:=:8443} - - --insecure-diagnostics=${CAPIBM_INSECURE_DIAGNOSTICS:=false} - - --service-endpoint=${SERVICE_ENDPOINT:=none} - - --v=${LOGLEVEL:=0} - command: - - /manager - env: - - name: IBM_CREDENTIALS_FILE - value: /home/.ibmcloud/ibm-credentials.env - image: to.be/replaced:v99 - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: healthz - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - - containerPort: 9440 - name: healthz - protocol: TCP - - containerPort: 8443 - name: metrics - protocol: TCP - readinessProbe: - httpGet: - path: /readyz - port: healthz - resources: - requests: - cpu: 10m - memory: 50Mi - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - - mountPath: /home/.ibmcloud - name: credentials - priorityClassName: system-cluster-critical - serviceAccountName: capi-ibmcloud-manager - terminationGracePeriodSeconds: 10 - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: webhook-server-cert - - name: credentials - secret: - secretName: capi-ibmcloud-manager-bootstrap-credentials - status: {} - --- - apiVersion: admissionregistration.k8s.io/v1 - kind: MutatingWebhookConfiguration - metadata: - annotations: - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-mutating-webhook-configuration - webhooks: - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervscluster - failurePolicy: Fail - name: mibmpowervscluster.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsclusters - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsclustertemplate - failurePolicy: Fail - name: mibmpowervsclustertemplate.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsclustertemplates - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsimage - failurePolicy: Fail - name: mibmpowervsimage.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsimages - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachine - failurePolicy: Fail - name: mibmpowervsmachine.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsmachines - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachinetemplate - failurePolicy: Fail - name: mibmpowervsmachinetemplate.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsmachinetemplates - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpccluster - failurePolicy: Fail - name: mibmvpccluster.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmvpcclusters - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachine - failurePolicy: Fail - name: mibmvpcmachine.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmvpcmachines - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachinetemplate - failurePolicy: Fail - name: mibmvpcmachinetemplate.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmvpcmachinetemplates - sideEffects: None - --- - apiVersion: admissionregistration.k8s.io/v1 - kind: ValidatingWebhookConfiguration - metadata: - annotations: - service.beta.openshift.io/inject-cabundle: "true" - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-validating-webhook-configuration - webhooks: - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervscluster - failurePolicy: Fail - name: vibmpowervscluster.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsclusters - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsclustertemplate - failurePolicy: Fail - name: vibmpowervsclustertemplate.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsclustertemplates - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsimage - failurePolicy: Fail - name: vibmpowervsimage.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsimages - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachine - failurePolicy: Fail - name: vibmpowervsmachine.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsmachines - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmpowervsmachinetemplate - failurePolicy: Fail - name: vibmpowervsmachinetemplate.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmpowervsmachinetemplates - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpccluster - failurePolicy: Fail - name: vibmvpccluster.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmvpcclusters - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachine - failurePolicy: Fail - name: vibmvpcmachine.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmvpcmachines - sideEffects: None - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-ibmvpcmachinetemplate - failurePolicy: Fail - name: vibmvpcmachinetemplate.kb.io - rules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - ibmvpcmachinetemplates - sideEffects: None - --- - apiVersion: v1 - kind: Service - metadata: - annotations: - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - service.beta.openshift.io/serving-cert-secret-name: webhook-server-cert - labels: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - clusterctl.cluster.x-k8s.io: "" - name: capi-ibmcloud-webhook-service - namespace: openshift-cluster-api - spec: - ports: - - port: 443 - targetPort: 9443 - selector: - cluster.x-k8s.io/provider: infrastructure-ibmcloud - control-plane: controller-manager - --- - apiVersion: admissionregistration.k8s.io/v1 - kind: ValidatingAdmissionPolicy - metadata: - name: openshift-cluster-api-protect-powervscluster - spec: - failurePolicy: Fail - matchConstraints: - resourceRules: - - apiGroups: - - infrastructure.cluster.x-k8s.io - apiVersions: - - '*' - operations: - - DELETE - resources: - - powervsclusters - paramKind: - apiVersion: config.openshift.io/v1 - kind: Infrastructure - validations: - - expression: '!(oldObject.metadata.name == params.status.infrastructureName)' - message: InfraCluster resources with metadata.name corresponding to the cluster - infrastructureName cannot be deleted. - --- - apiVersion: admissionregistration.k8s.io/v1 - kind: ValidatingAdmissionPolicyBinding - metadata: - name: openshift-cluster-api-protect-powervscluster - spec: - matchResources: - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: openshift-cluster-api - paramRef: - name: cluster - parameterNotFoundAction: Deny - policyName: openshift-cluster-api-protect-powervscluster - validationActions: - - Deny - metadata: | - # maps release series of major.minor to cluster-api contract version - # the contract version may change between minor or major versions, but *not* - # between patch versions. - # - # update this file only when a new major or minor version is released - apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 - kind: Metadata - releaseSeries: - - major: 0 - minor: 2 - contract: v1beta1 - - major: 0 - minor: 3 - contract: v1beta1 - - major: 0 - minor: 4 - contract: v1beta1 - - major: 0 - minor: 5 - contract: v1beta1 - - major: 0 - minor: 6 - contract: v1beta1 - - major: 0 - minor: 7 - contract: v1beta1 - - major: 0 - minor: 8 - contract: v1beta1 - - major: 0 - minor: 9 - contract: v1beta1 - - major: 0 - minor: 10 - contract: v1beta1 - - major: 0 - minor: 11 - contract: v1beta1 - - major: 0 - minor: 12 - contract: v1beta1 -kind: ConfigMap -metadata: - annotations: - exclude.release.openshift.io/internal-openshift-hosted: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade,TechPreviewNoUpgrade - creationTimestamp: null - labels: - provider.cluster.x-k8s.io/name: ibmcloud - provider.cluster.x-k8s.io/type: infrastructure - provider.cluster.x-k8s.io/version: v0.12.0 - name: ibmcloud - namespace: openshift-cluster-api diff --git a/openshift/tools/go.mod b/openshift/tools/go.mod index 5808942faf..07415a1d10 100644 --- a/openshift/tools/go.mod +++ b/openshift/tools/go.mod @@ -4,96 +4,72 @@ go 1.24.0 toolchain go1.24.3 -require github.com/openshift/cluster-capi-operator/manifests-gen v0.0.0-20251031094146-ef85dc990f42 +require github.com/openshift/cluster-capi-operator/manifests-gen v0.0.0-20260212142421-599f7fc1eb9f require ( - github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect - github.com/adrg/xdg v0.5.3 // indirect - github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cert-manager/cert-manager v1.18.2 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudflare/circl v1.6.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect + github.com/docker/cli v29.2.1+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect - github.com/go-viper/mapstructure/v2 v2.3.0 // indirect - github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.9 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-github/v53 v53.2.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.1 // indirect github.com/mailru/easyjson v0.9.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/openshift/api v0.0.0-20260205045418-81371d13d1fc // indirect + github.com/openshift/cluster-capi-operator v0.0.0-20260212142421-599f7fc1eb9f // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.64.0 // indirect - github.com/prometheus/procfs v0.16.1 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/spf13/viper v1.20.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect - golang.org/x/time v0.11.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.33.3 // indirect - k8s.io/apiextensions-apiserver v0.33.3 // indirect - k8s.io/apimachinery v0.33.3 // indirect - k8s.io/client-go v0.33.3 // indirect - k8s.io/cluster-bootstrap v0.32.3 // indirect - k8s.io/component-base v0.33.3 // indirect + k8s.io/api v0.34.1 // indirect + k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/apimachinery v0.34.1 // indirect + k8s.io/client-go v0.34.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect - sigs.k8s.io/cluster-api v1.10.4 // indirect - sigs.k8s.io/controller-runtime v0.20.4 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + sigs.k8s.io/controller-runtime v0.22.4 // indirect sigs.k8s.io/gateway-api v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/kustomize/api v0.19.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/openshift/tools/go.sum b/openshift/tools/go.sum index 1a89d6801e..01415af53e 100644 --- a/openshift/tools/go.sum +++ b/openshift/tools/go.sum @@ -1,41 +1,31 @@ -github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= -github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= -github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= -github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cert-manager/cert-manager v1.18.2 h1:H2P75ycGcTMauV3gvpkDqLdS3RSXonWF2S49QGA1PZE= github.com/cert-manager/cert-manager v1.18.2/go.mod h1:icDJx4kG9BCNpGjBvrmsFd99d+lXUvWdkkcrSSQdIiw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= -github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= +github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg= +github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= -github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= @@ -50,32 +40,17 @@ github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZ github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= -github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= -github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= -github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-github/v53 v53.2.0 h1:wvz3FyF53v4BK+AsnvCmeNhf8AkTaeh2SoYu/XUvTtI= -github.com/google/go-github/v53 v53.2.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -84,149 +59,132 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= -github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/openshift/cluster-capi-operator/manifests-gen v0.0.0-20251031094146-ef85dc990f42 h1:Ygor6slYft8fSpSScTQYVcWLgitSCzxDtYaaRIGwl3w= -github.com/openshift/cluster-capi-operator/manifests-gen v0.0.0-20251031094146-ef85dc990f42/go.mod h1:PeE/dvzcNuO1qmTDrYlSxZqtbuCeVJPX7y8YmIufqhM= -github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= -github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/openshift/api v0.0.0-20260205045418-81371d13d1fc h1:Crx99bQsvRhRMOMLssfVChaOItelVQhsv3esIFpcszc= +github.com/openshift/api v0.0.0-20260205045418-81371d13d1fc/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/cluster-capi-operator v0.0.0-20260212142421-599f7fc1eb9f h1:ZE6AqnnRcJufyY0wyzY2SGFZ+IU2jWJX4LmwRLDrQjU= +github.com/openshift/cluster-capi-operator v0.0.0-20260212142421-599f7fc1eb9f/go.mod h1:2R1aARTzVcwlsyIpfNMIS38HmcanJEABru8WAtlaKLg= +github.com/openshift/cluster-capi-operator/manifests-gen v0.0.0-20260212142421-599f7fc1eb9f h1:/tFERwd77WDws/rR/Iua1gNlurfalbpfdnusRjtGr18= +github.com/openshift/cluster-capi-operator/manifests-gen v0.0.0-20260212142421-599f7fc1eb9f/go.mod h1:6XQ1uexHjw9fmXbG34mHcIEK6jFZRXU+F5gbo7lt0SE= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= -github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= -github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= -gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -237,40 +195,35 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= -k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= -k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= -k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= -k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= -k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= -k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= -k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= -k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/cluster-api v1.10.4 h1:5mdyWLGbbwOowWrjqM/J9N600QnxTohu5J1/1YR6g7c= -sigs.k8s.io/cluster-api v1.10.4/go.mod h1:68GJs286ZChsncp+TxYNj/vhy2NWokiPtH4+SA0afs0= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= -sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= -sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= -sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/openshift/tools/vendor/github.com/MakeNowJust/heredoc/LICENSE b/openshift/tools/vendor/github.com/MakeNowJust/heredoc/LICENSE deleted file mode 100644 index 6d0eb9d5d6..0000000000 --- a/openshift/tools/vendor/github.com/MakeNowJust/heredoc/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2019 TSUYUSATO Kitsune - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/MakeNowJust/heredoc/README.md b/openshift/tools/vendor/github.com/MakeNowJust/heredoc/README.md deleted file mode 100644 index e9924d2974..0000000000 --- a/openshift/tools/vendor/github.com/MakeNowJust/heredoc/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# heredoc - -[![Build Status](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![GoDoc](https://godoc.org/github.com/MakeNowJusti/heredoc?status.svg)](https://godoc.org/github.com/MakeNowJust/heredoc) - -## About - -Package heredoc provides the here-document with keeping indent. - -## Install - -```console -$ go get github.com/MakeNowJust/heredoc -``` - -## Import - -```go -// usual -import "github.com/MakeNowJust/heredoc" -``` - -## Example - -```go -package main - -import ( - "fmt" - "github.com/MakeNowJust/heredoc" -) - -func main() { - fmt.Println(heredoc.Doc(` - Lorem ipsum dolor sit amet, consectetur adipisicing elit, - sed do eiusmod tempor incididunt ut labore et dolore magna - aliqua. Ut enim ad minim veniam, ... - `)) - // Output: - // Lorem ipsum dolor sit amet, consectetur adipisicing elit, - // sed do eiusmod tempor incididunt ut labore et dolore magna - // aliqua. Ut enim ad minim veniam, ... - // -} -``` - -## API Document - - - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc) - -## License - -This software is released under the MIT License, see LICENSE. diff --git a/openshift/tools/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/openshift/tools/vendor/github.com/MakeNowJust/heredoc/heredoc.go deleted file mode 100644 index 1fc0469555..0000000000 --- a/openshift/tools/vendor/github.com/MakeNowJust/heredoc/heredoc.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2014-2019 TSUYUSATO Kitsune -// This software is released under the MIT License. -// http://opensource.org/licenses/mit-license.php - -// Package heredoc provides creation of here-documents from raw strings. -// -// Golang supports raw-string syntax. -// -// doc := ` -// Foo -// Bar -// ` -// -// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to -// -// "\n\tFoo\n\tBar\n" -// -// I dont't want this! -// -// However this problem is solved by package heredoc. -// -// doc := heredoc.Doc(` -// Foo -// Bar -// `) -// -// Is equivalent to -// -// "Foo\nBar\n" -package heredoc - -import ( - "fmt" - "strings" - "unicode" -) - -const maxInt = int(^uint(0) >> 1) - -// Doc returns un-indented string as here-document. -func Doc(raw string) string { - skipFirstLine := false - if len(raw) > 0 && raw[0] == '\n' { - raw = raw[1:] - } else { - skipFirstLine = true - } - - lines := strings.Split(raw, "\n") - - minIndentSize := getMinIndent(lines, skipFirstLine) - lines = removeIndentation(lines, minIndentSize, skipFirstLine) - - return strings.Join(lines, "\n") -} - -// getMinIndent calculates the minimum indentation in lines, excluding empty lines. -func getMinIndent(lines []string, skipFirstLine bool) int { - minIndentSize := maxInt - - for i, line := range lines { - if i == 0 && skipFirstLine { - continue - } - - indentSize := 0 - for _, r := range []rune(line) { - if unicode.IsSpace(r) { - indentSize += 1 - } else { - break - } - } - - if len(line) == indentSize { - if i == len(lines)-1 && indentSize < minIndentSize { - lines[i] = "" - } - } else if indentSize < minIndentSize { - minIndentSize = indentSize - } - } - return minIndentSize -} - -// removeIndentation removes n characters from the front of each line in lines. -// Skips first line if skipFirstLine is true, skips empty lines. -func removeIndentation(lines []string, n int, skipFirstLine bool) []string { - for i, line := range lines { - if i == 0 && skipFirstLine { - continue - } - - if len(lines[i]) >= n { - lines[i] = line[n:] - } - } - return lines -} - -// Docf returns unindented and formatted string as here-document. -// Formatting is done as for fmt.Printf(). -func Docf(raw string, args ...interface{}) string { - return fmt.Sprintf(Doc(raw), args...) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/AUTHORS deleted file mode 100644 index 2b00ddba0d..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976f..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/LICENSE b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/PATENTS b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/PATENTS deleted file mode 100644 index 733099041f..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go deleted file mode 100644 index 3ed3f43573..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go +++ /dev/null @@ -1,381 +0,0 @@ -package bitcurves - -// Copyright 2010 The Go Authors. All rights reserved. -// Copyright 2011 ThePiachu. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bitelliptic implements several Koblitz elliptic curves over prime -// fields. - -// This package operates, internally, on Jacobian coordinates. For a given -// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) -// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole -// calculation can be performed within the transform (as in ScalarMult and -// ScalarBaseMult). But even for Add and Double, it's faster to apply and -// reverse the transform than to operate in affine coordinates. - -import ( - "crypto/elliptic" - "io" - "math/big" - "sync" -) - -// A BitCurve represents a Koblitz Curve with a=0. -// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html -type BitCurve struct { - Name string - P *big.Int // the order of the underlying field - N *big.Int // the order of the base point - B *big.Int // the constant of the BitCurve equation - Gx, Gy *big.Int // (x,y) of the base point - BitSize int // the size of the underlying field -} - -// Params returns the parameters of the given BitCurve (see BitCurve struct) -func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { - cp = new(elliptic.CurveParams) - cp.Name = bitCurve.Name - cp.P = bitCurve.P - cp.N = bitCurve.N - cp.Gx = bitCurve.Gx - cp.Gy = bitCurve.Gy - cp.BitSize = bitCurve.BitSize - return cp -} - -// IsOnCurve returns true if the given (x,y) lies on the BitCurve. -func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { - // y² = x³ + b - y2 := new(big.Int).Mul(y, y) //y² - y2.Mod(y2, bitCurve.P) //y²%P - - x3 := new(big.Int).Mul(x, x) //x² - x3.Mul(x3, x) //x³ - - x3.Add(x3, bitCurve.B) //x³+B - x3.Mod(x3, bitCurve.P) //(x³+B)%P - - return x3.Cmp(y2) == 0 -} - -// affineFromJacobian reverses the Jacobian transform. See the comment at the -// top of the file. -func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { - if z.Cmp(big.NewInt(0)) == 0 { - panic("bitcurve: Can't convert to affine with Jacobian Z = 0") - } - // x = YZ^2 mod P - zinv := new(big.Int).ModInverse(z, bitCurve.P) - zinvsq := new(big.Int).Mul(zinv, zinv) - - xOut = new(big.Int).Mul(x, zinvsq) - xOut.Mod(xOut, bitCurve.P) - // y = YZ^3 mod P - zinvsq.Mul(zinvsq, zinv) - yOut = new(big.Int).Mul(y, zinvsq) - yOut.Mod(yOut, bitCurve.P) - return xOut, yOut -} - -// Add returns the sum of (x1,y1) and (x2,y2) -func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { - z := new(big.Int).SetInt64(1) - x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) - return bitCurve.affineFromJacobian(x, y, z) -} - -// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and -// (x2, y2, z2) and returns their sum, also in Jacobian form. -func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { - // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl - z1z1 := new(big.Int).Mul(z1, z1) - z1z1.Mod(z1z1, bitCurve.P) - z2z2 := new(big.Int).Mul(z2, z2) - z2z2.Mod(z2z2, bitCurve.P) - - u1 := new(big.Int).Mul(x1, z2z2) - u1.Mod(u1, bitCurve.P) - u2 := new(big.Int).Mul(x2, z1z1) - u2.Mod(u2, bitCurve.P) - h := new(big.Int).Sub(u2, u1) - if h.Sign() == -1 { - h.Add(h, bitCurve.P) - } - i := new(big.Int).Lsh(h, 1) - i.Mul(i, i) - j := new(big.Int).Mul(h, i) - - s1 := new(big.Int).Mul(y1, z2) - s1.Mul(s1, z2z2) - s1.Mod(s1, bitCurve.P) - s2 := new(big.Int).Mul(y2, z1) - s2.Mul(s2, z1z1) - s2.Mod(s2, bitCurve.P) - r := new(big.Int).Sub(s2, s1) - if r.Sign() == -1 { - r.Add(r, bitCurve.P) - } - r.Lsh(r, 1) - v := new(big.Int).Mul(u1, i) - - x3 := new(big.Int).Set(r) - x3.Mul(x3, x3) - x3.Sub(x3, j) - x3.Sub(x3, v) - x3.Sub(x3, v) - x3.Mod(x3, bitCurve.P) - - y3 := new(big.Int).Set(r) - v.Sub(v, x3) - y3.Mul(y3, v) - s1.Mul(s1, j) - s1.Lsh(s1, 1) - y3.Sub(y3, s1) - y3.Mod(y3, bitCurve.P) - - z3 := new(big.Int).Add(z1, z2) - z3.Mul(z3, z3) - z3.Sub(z3, z1z1) - if z3.Sign() == -1 { - z3.Add(z3, bitCurve.P) - } - z3.Sub(z3, z2z2) - if z3.Sign() == -1 { - z3.Add(z3, bitCurve.P) - } - z3.Mul(z3, h) - z3.Mod(z3, bitCurve.P) - - return x3, y3, z3 -} - -// Double returns 2*(x,y) -func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { - z1 := new(big.Int).SetInt64(1) - return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) -} - -// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and -// returns its double, also in Jacobian form. -func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { - // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l - - a := new(big.Int).Mul(x, x) //X1² - b := new(big.Int).Mul(y, y) //Y1² - c := new(big.Int).Mul(b, b) //B² - - d := new(big.Int).Add(x, b) //X1+B - d.Mul(d, d) //(X1+B)² - d.Sub(d, a) //(X1+B)²-A - d.Sub(d, c) //(X1+B)²-A-C - d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) - - e := new(big.Int).Mul(big.NewInt(3), a) //3*A - f := new(big.Int).Mul(e, e) //E² - - x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D - x3.Sub(f, x3) //F-2*D - x3.Mod(x3, bitCurve.P) - - y3 := new(big.Int).Sub(d, x3) //D-X3 - y3.Mul(e, y3) //E*(D-X3) - y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C - y3.Mod(y3, bitCurve.P) - - z3 := new(big.Int).Mul(y, z) //Y1*Z1 - z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 - z3.Mod(z3, bitCurve.P) - - return x3, y3, z3 -} - -//TODO: double check if it is okay -// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. -func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { - // We have a slight problem in that the identity of the group (the - // point at infinity) cannot be represented in (x, y) form on a finite - // machine. Thus the standard add/double algorithm has to be tweaked - // slightly: our initial state is not the identity, but x, and we - // ignore the first true bit in |k|. If we don't find any true bits in - // |k|, then we return nil, nil, because we cannot return the identity - // element. - - Bz := new(big.Int).SetInt64(1) - x := Bx - y := By - z := Bz - - seenFirstTrue := false - for _, byte := range k { - for bitNum := 0; bitNum < 8; bitNum++ { - if seenFirstTrue { - x, y, z = bitCurve.doubleJacobian(x, y, z) - } - if byte&0x80 == 0x80 { - if !seenFirstTrue { - seenFirstTrue = true - } else { - x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) - } - } - byte <<= 1 - } - } - - if !seenFirstTrue { - return nil, nil - } - - return bitCurve.affineFromJacobian(x, y, z) -} - -// ScalarBaseMult returns k*G, where G is the base point of the group and k is -// an integer in big-endian form. -func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { - return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) -} - -var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} - -//TODO: double check if it is okay -// GenerateKey returns a public/private key pair. The private key is generated -// using the given reader, which must return random data. -func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { - byteLen := (bitCurve.BitSize + 7) >> 3 - priv = make([]byte, byteLen) - - for x == nil { - _, err = io.ReadFull(rand, priv) - if err != nil { - return - } - // We have to mask off any excess bits in the case that the size of the - // underlying field is not a whole number of bytes. - priv[0] &= mask[bitCurve.BitSize%8] - // This is because, in tests, rand will return all zeros and we don't - // want to get the point at infinity and loop forever. - priv[1] ^= 0x42 - x, y = bitCurve.ScalarBaseMult(priv) - } - return -} - -// Marshal converts a point into the form specified in section 4.3.6 of ANSI -// X9.62. -func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { - byteLen := (bitCurve.BitSize + 7) >> 3 - - ret := make([]byte, 1+2*byteLen) - ret[0] = 4 // uncompressed point - - xBytes := x.Bytes() - copy(ret[1+byteLen-len(xBytes):], xBytes) - yBytes := y.Bytes() - copy(ret[1+2*byteLen-len(yBytes):], yBytes) - return ret -} - -// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On -// error, x = nil. -func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { - byteLen := (bitCurve.BitSize + 7) >> 3 - if len(data) != 1+2*byteLen { - return - } - if data[0] != 4 { // uncompressed form - return - } - x = new(big.Int).SetBytes(data[1 : 1+byteLen]) - y = new(big.Int).SetBytes(data[1+byteLen:]) - return -} - -//curve parameters taken from: -//http://www.secg.org/collateral/sec2_final.pdf - -var initonce sync.Once -var secp160k1 *BitCurve -var secp192k1 *BitCurve -var secp224k1 *BitCurve -var secp256k1 *BitCurve - -func initAll() { - initS160() - initS192() - initS224() - initS256() -} - -func initS160() { - // See SEC 2 section 2.4.1 - secp160k1 = new(BitCurve) - secp160k1.Name = "secp160k1" - secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) - secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) - secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) - secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) - secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) - secp160k1.BitSize = 160 -} - -func initS192() { - // See SEC 2 section 2.5.1 - secp192k1 = new(BitCurve) - secp192k1.Name = "secp192k1" - secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) - secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) - secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) - secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) - secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) - secp192k1.BitSize = 192 -} - -func initS224() { - // See SEC 2 section 2.6.1 - secp224k1 = new(BitCurve) - secp224k1.Name = "secp224k1" - secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) - secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) - secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) - secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) - secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) - secp224k1.BitSize = 224 -} - -func initS256() { - // See SEC 2 section 2.7.1 - secp256k1 = new(BitCurve) - secp256k1.Name = "secp256k1" - secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) - secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) - secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) - secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) - secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) - secp256k1.BitSize = 256 -} - -// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) -func S160() *BitCurve { - initonce.Do(initAll) - return secp160k1 -} - -// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) -func S192() *BitCurve { - initonce.Do(initAll) - return secp192k1 -} - -// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) -func S224() *BitCurve { - initonce.Do(initAll) - return secp224k1 -} - -// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) -func S256() *BitCurve { - initonce.Do(initAll) - return secp256k1 -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go deleted file mode 100644 index cb6676de24..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package brainpool implements Brainpool elliptic curves. -// Implementation of rcurves is from github.com/ebfe/brainpool -// Note that these curves are implemented with naive, non-constant time operations -// and are likely not suitable for environments where timing attacks are a concern. -package brainpool - -import ( - "crypto/elliptic" - "math/big" - "sync" -) - -var ( - once sync.Once - p256t1, p384t1, p512t1 *elliptic.CurveParams - p256r1, p384r1, p512r1 *rcurve -) - -func initAll() { - initP256t1() - initP384t1() - initP512t1() - initP256r1() - initP384r1() - initP512r1() -} - -func initP256t1() { - p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} - p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) - p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) - p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) - p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) - p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) - p256t1.BitSize = 256 -} - -func initP256r1() { - twisted := p256t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP256r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) - params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) - z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) - p256r1 = newrcurve(twisted, params, z) -} - -func initP384t1() { - p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} - p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) - p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) - p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) - p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) - p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) - p384t1.BitSize = 384 -} - -func initP384r1() { - twisted := p384t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP384r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) - params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) - z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) - p384r1 = newrcurve(twisted, params, z) -} - -func initP512t1() { - p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} - p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) - p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) - p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) - p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) - p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) - p512t1.BitSize = 512 -} - -func initP512r1() { - twisted := p512t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP512r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) - params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) - z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) - p512r1 = newrcurve(twisted, params, z) -} - -// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) -func P256t1() elliptic.Curve { - once.Do(initAll) - return p256t1 -} - -// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) -func P256r1() elliptic.Curve { - once.Do(initAll) - return p256r1 -} - -// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) -func P384t1() elliptic.Curve { - once.Do(initAll) - return p384t1 -} - -// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) -func P384r1() elliptic.Curve { - once.Do(initAll) - return p384r1 -} - -// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) -func P512t1() elliptic.Curve { - once.Do(initAll) - return p512t1 -} - -// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) -func P512r1() elliptic.Curve { - once.Do(initAll) - return p512r1 -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go deleted file mode 100644 index 2d5355085f..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go +++ /dev/null @@ -1,83 +0,0 @@ -package brainpool - -import ( - "crypto/elliptic" - "math/big" -) - -var _ elliptic.Curve = (*rcurve)(nil) - -type rcurve struct { - twisted elliptic.Curve - params *elliptic.CurveParams - z *big.Int - zinv *big.Int - z2 *big.Int - z3 *big.Int - zinv2 *big.Int - zinv3 *big.Int -} - -var ( - two = big.NewInt(2) - three = big.NewInt(3) -) - -func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { - zinv := new(big.Int).ModInverse(z, params.P) - return &rcurve{ - twisted: twisted, - params: params, - z: z, - zinv: zinv, - z2: new(big.Int).Exp(z, two, params.P), - z3: new(big.Int).Exp(z, three, params.P), - zinv2: new(big.Int).Exp(zinv, two, params.P), - zinv3: new(big.Int).Exp(zinv, three, params.P), - } -} - -func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { - var tx, ty big.Int - tx.Mul(x, curve.z2) - tx.Mod(&tx, curve.params.P) - ty.Mul(y, curve.z3) - ty.Mod(&ty, curve.params.P) - return &tx, &ty -} - -func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { - var x, y big.Int - x.Mul(tx, curve.zinv2) - x.Mod(&x, curve.params.P) - y.Mul(ty, curve.zinv3) - y.Mod(&y, curve.params.P) - return &x, &y -} - -func (curve *rcurve) Params() *elliptic.CurveParams { - return curve.params -} - -func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { - return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) -} - -func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { - tx1, ty1 := curve.toTwisted(x1, y1) - tx2, ty2 := curve.toTwisted(x2, y2) - return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) -} - -func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { - return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) -} - -func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { - tx1, ty1 := curve.toTwisted(x1, y1) - return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) -} - -func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { - return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) -} \ No newline at end of file diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/eax.go deleted file mode 100644 index 6b6bc7aed0..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/eax.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -// Package eax provides an implementation of the EAX -// (encrypt-authenticate-translate) mode of operation, as described in -// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS -// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." -// In FSE'04, volume 3017 of LNCS, 2004 -package eax - -import ( - "crypto/cipher" - "crypto/subtle" - "errors" - "github.com/ProtonMail/go-crypto/internal/byteutil" -) - -const ( - defaultTagSize = 16 - defaultNonceSize = 16 -) - -type eax struct { - block cipher.Block // Only AES-{128, 192, 256} supported - tagSize int // At least 12 bytes recommended - nonceSize int -} - -func (e *eax) NonceSize() int { - return e.nonceSize -} - -func (e *eax) Overhead() int { - return e.tagSize -} - -// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and -// tag lengths. Supports {128, 192, 256}- bit key length. -func NewEAX(block cipher.Block) (cipher.AEAD, error) { - return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) -} - -// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and -// given nonce and tag lengths in bytes. Panics on zero nonceSize and -// exceedingly long tags. -// -// It is recommended to use at least 12 bytes as tag length (see, for instance, -// NIST SP 800-38D). -// -// Only to be used for compatibility with existing cryptosystems with -// non-standard parameters. For all other cases, prefer NewEAX. -func NewEAXWithNonceAndTagSize( - block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { - if nonceSize < 1 { - return nil, eaxError("Cannot initialize EAX with nonceSize = 0") - } - if tagSize > block.BlockSize() { - return nil, eaxError("Custom tag length exceeds blocksize") - } - return &eax{ - block: block, - tagSize: tagSize, - nonceSize: nonceSize, - }, nil -} - -func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { - if len(nonce) > e.nonceSize { - panic("crypto/eax: Nonce too long for this instance") - } - ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize) - omacNonce := e.omacT(0, nonce) - omacAdata := e.omacT(1, adata) - - // Encrypt message using CTR mode and omacNonce as IV - ctr := cipher.NewCTR(e.block, omacNonce) - ciphertextData := out[:len(plaintext)] - ctr.XORKeyStream(ciphertextData, plaintext) - - omacCiphertext := e.omacT(2, ciphertextData) - - tag := out[len(plaintext):] - for i := 0; i < e.tagSize; i++ { - tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] - } - return ret -} - -func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { - if len(nonce) > e.nonceSize { - panic("crypto/eax: Nonce too long for this instance") - } - if len(ciphertext) < e.tagSize { - return nil, eaxError("Ciphertext shorter than tag length") - } - sep := len(ciphertext) - e.tagSize - - // Compute tag - omacNonce := e.omacT(0, nonce) - omacAdata := e.omacT(1, adata) - omacCiphertext := e.omacT(2, ciphertext[:sep]) - - tag := make([]byte, e.tagSize) - for i := 0; i < e.tagSize; i++ { - tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] - } - - // Compare tags - if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { - return nil, eaxError("Tag authentication failed") - } - - // Decrypt ciphertext - ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) - ctr := cipher.NewCTR(e.block, omacNonce) - ctr.XORKeyStream(out, ciphertext[:sep]) - - return ret[:sep], nil -} - -// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) -func (e *eax) omacT(t byte, plaintext []byte) []byte { - blockSize := e.block.BlockSize() - byteT := make([]byte, blockSize) - byteT[blockSize-1] = t - concat := append(byteT, plaintext...) - return e.omac(concat) -} - -func (e *eax) omac(plaintext []byte) []byte { - blockSize := e.block.BlockSize() - // L ← E_K(0^n); B ← 2L; P ← 4L - L := make([]byte, blockSize) - e.block.Encrypt(L, L) - B := byteutil.GfnDouble(L) - P := byteutil.GfnDouble(B) - - // CBC with IV = 0 - cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) - padded := e.pad(plaintext, B, P) - cbcCiphertext := make([]byte, len(padded)) - cbc.CryptBlocks(cbcCiphertext, padded) - - return cbcCiphertext[len(cbcCiphertext)-blockSize:] -} - -func (e *eax) pad(plaintext, B, P []byte) []byte { - // if |M| in {n, 2n, 3n, ...} - blockSize := e.block.BlockSize() - if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { - return byteutil.RightXor(plaintext, B) - } - - // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P - ending := make([]byte, blockSize-len(plaintext)%blockSize) - ending[0] = 0x80 - padded := append(plaintext, ending...) - return byteutil.RightXor(padded, P) -} - -func eaxError(err string) error { - return errors.New("crypto/eax: " + err) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go deleted file mode 100644 index ddb53d0790..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go +++ /dev/null @@ -1,58 +0,0 @@ -package eax - -// Test vectors from -// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf -var testVectors = []struct { - msg, key, nonce, header, ciphertext string -}{ - {"", - "233952DEE4D5ED5F9B9C6D6FF80FF478", - "62EC67F9C3A4A407FCB2A8C49031A8B3", - "6BFB914FD07EAE6B", - "E037830E8389F27B025A2D6527E79D01"}, - {"F7FB", - "91945D3F4DCBEE0BF45EF52255F095A4", - "BECAF043B0A23D843194BA972C66DEBD", - "FA3BFD4806EB53FA", - "19DD5C4C9331049D0BDAB0277408F67967E5"}, - {"1A47CB4933", - "01F74AD64077F2E704C0F60ADA3DD523", - "70C3DB4F0D26368400A10ED05D2BFF5E", - "234A3463C1264AC6", - "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, - {"481C9E39B1", - "D07CF6CBB7F313BDDE66B727AFD3C5E8", - "8408DFFF3C1A2B1292DC199E46B7D617", - "33CCE2EABFF5A79D", - "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, - {"40D0C07DA5E4", - "35B6D0580005BBC12B0587124557D2C2", - "FDB6B06676EEDC5C61D74276E1F8E816", - "AEB96EAEBE2970E9", - "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, - {"4DE3B35C3FC039245BD1FB7D", - "BD8E6E11475E60B268784C38C62FEB22", - "6EAC5C93072D8E8513F750935E46DA1B", - "D4482D1CA78DCE0F", - "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, - {"8B0A79306C9CE7ED99DAE4F87F8DD61636", - "7C77D6E813BED5AC98BAA417477A2E7D", - "1A8C98DCD73D38393B2BF1569DEEFC19", - "65D2017990D62528", - "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, - {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", - "5FFF20CAFAB119CA2FC73549E20F5B0D", - "DDE59B97D722156D4D9AFF2BC7559826", - "54B9F04E6A09189A", - "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, - {"6CF36720872B8513F6EAB1A8A44438D5EF11", - "A4A4782BCFFD3EC5E7EF6D8C34A56123", - "B781FCF2F75FA5A8DE97A9CA48E522EC", - "899A175897561D7E", - "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, - {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", - "8395FCF1E95BEBD697BD010BC766AAC3", - "22E7ADD93CFC6393C57EC0B3C17D6B44", - "126735FCC320D25A", - "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go deleted file mode 100644 index 4eb19f28d9..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go +++ /dev/null @@ -1,131 +0,0 @@ -// These vectors include key length in {128, 192, 256}, tag size 128, and -// random nonce, header, and plaintext lengths. - -// This file was automatically generated. - -package eax - -var randomVectors = []struct { - key, nonce, header, plaintext, ciphertext string -}{ - {"DFDE093F36B0356E5A81F609786982E3", - "1D8AC604419001816905BA72B14CED7E", - "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", - "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", - "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, - {"F10619EF02E5D94D7550EB84ED364A21", - "8DC0D4F2F745BBAE835CC5574B942D20", - "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", - "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", - "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, - {"429F514EFC64D98A698A9247274CFF45", - "976AA5EB072F912D126ACEBC954FEC38", - "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", - "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", - "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, - {"398138F309085F47F8457CDF53895A63", - "F8A8A7F2D28E5FFF7BBC2F24353F7A36", - "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", - "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", - "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, - {"7A4151EBD3901B42CBA45DAFB2E931BA", - "0FC88ACEE74DD538040321C330974EB8", - "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", - "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", - "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, - {"BFB147E1CD5459424F8C0271FC0E0DC5", - "EABCC126442BF373969EA3015988CC45", - "4C0880E1D71AA2C7", - "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", - "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, - {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", - "0F84B5D36CF4BC3B863313AF3B4D2E97", - "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", - "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", - "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, - {"44E6F2DC8FDC778AD007137D11410F50", - "270A237AD977F7187AA6C158A0BAB24F", - "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", - "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", - "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, - {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", - "8B4BBE26CCD9859DCD84884159D6B0A4", - "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", - "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", - "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, - {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", - "92745C018AA708ECFEB1667E9F3F1B01", - "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", - "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", - "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, - {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", - "0F45CF7F0BF31CCEB85D9DA10F4D749F", - "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", - "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", - "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, - {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", - "5B11EA1D6008EBB41CF892FCA5B943D1", - "BAF4FF5C8242", - "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", - "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, - {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", - "FD82B5B31804FF47D44199B533D0CF84", - "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", - "B804", - "164BB965C05EBE0931A1A63293EDF9C38C27"}, - {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", - "343BE00DA9483F05C14F2E9EB8EA6AE8", - "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", - "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", - "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, - {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", - "E280E13D7367042E3AA09A80111B6184", - "21486C9D7A9647", - "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", - "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, - {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", - "656B41CB3F9CF8C08BAD7EBFC80BD225", - "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", - "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", - "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, - {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", - "DA0F3A40983D92F2D4C01FED33C7A192", - "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", - "F37326A80E08", - "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, - {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", - "3EE1182AEBB19A02B128F28E1D5F7F99", - "D9F35ABB16D776CE", - "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", - "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, - {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", - "336F5D681E0410FAE7B607246092C6DC", - "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", - "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", - "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, - {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", - "AB91F7245DDCE3F1C747872D47BE0A8A", - "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", - "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", - "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, - {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", - "C691294EF67CD04D1B9242AF83DD1421", - "879334DAE3", - "1E17F46A98FEF5CBB40759D95354", - "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, - {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", - "4228DA0678CA3534588859E77DFF014C", - "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", - "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", - "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, - {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", - "943188480C99437495958B0AE4831AA9", - "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", - "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", - "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, - {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", - "B0490F5BD68A52459556B3749ACDF40E", - "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", - "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", - "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go deleted file mode 100644 index a6bdf51232..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG -// This file contains necessary tools for the aex and ocb packages. -// -// These functions SHOULD NOT be used elsewhere, since they are optimized for -// specific input nature in the EAX and OCB modes of operation. - -package byteutil - -// GfnDouble computes 2 * input in the field of 2^n elements. -// The irreducible polynomial in the finite field for n=128 is -// x^128 + x^7 + x^2 + x + 1 (equals 0x87) -// Constant-time execution in order to avoid side-channel attacks -func GfnDouble(input []byte) []byte { - if len(input) != 16 { - panic("Doubling in GFn only implemented for n = 128") - } - // If the first bit is zero, return 2L = L << 1 - // Else return (L << 1) xor 0^120 10000111 - shifted := ShiftBytesLeft(input) - shifted[15] ^= ((input[0] >> 7) * 0x87) - return shifted -} - -// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. -func ShiftBytesLeft(x []byte) []byte { - l := len(x) - dst := make([]byte, l) - for i := 0; i < l-1; i++ { - dst[i] = (x[i] << 1) | (x[i+1] >> 7) - } - dst[l-1] = x[l-1] << 1 - return dst -} - -// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. -func ShiftNBytesLeft(dst, x []byte, n int) { - // Erase first n / 8 bytes - copy(dst, x[n/8:]) - - // Shift the remaining n % 8 bits - bits := uint(n % 8) - l := len(dst) - for i := 0; i < l-1; i++ { - dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits)) - } - dst[l-1] = dst[l-1] << bits - - // Append trailing zeroes - dst = append(dst, make([]byte, n/8)...) -} - -// XorBytesMut assumes equal input length, replaces X with X XOR Y -func XorBytesMut(X, Y []byte) { - for i := 0; i < len(X); i++ { - X[i] ^= Y[i] - } -} - - -// XorBytes assumes equal input length, puts X XOR Y into Z -func XorBytes(Z, X, Y []byte) { - for i := 0; i < len(X); i++ { - Z[i] = X[i] ^ Y[i] - } -} - -// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) -func RightXor(X, Y []byte) []byte { - offset := len(X) - len(Y) - xored := make([]byte, len(X)); - copy(xored, X) - for i := 0; i < len(Y); i++ { - xored[offset + i] ^= Y[i] - } - return xored -} - -// SliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func SliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} - diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go deleted file mode 100644 index 7f78cfa759..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -// Package ocb provides an implementation of the OCB (offset codebook) mode of -// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, -// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT -// AUTHENTICATED ENCRYPTION (2003). -// Security considerations (from RFC-7253): A private key MUST NOT be used to -// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a -// brute-force forging adversary succeeds after 2^{tag length} attempts). A -// single key SHOULD NOT be used to decrypt ciphertext with different tag -// lengths. Nonces need not be secret, but MUST NOT be reused. -// This package only supports underlying block ciphers with 128-bit blocks, -// such as AES-{128, 192, 256}, but may be extended to other sizes. -package ocb - -import ( - "bytes" - "crypto/cipher" - "crypto/subtle" - "errors" - "github.com/ProtonMail/go-crypto/internal/byteutil" - "math/bits" -) - -type ocb struct { - block cipher.Block - tagSize int - nonceSize int - mask mask - // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' - // internal variable can be reused for en/decrypting with nonces sharing - // all but the last 6 bits with N. The prefix of the first nonce used to - // compute the new Ktop, and the Ktop value itself, are stored in - // reusableKtop. If using incremental nonces, this saves one block cipher - // call every 63 out of 64 OCB encryptions, and stores one nonce and one - // output of the block cipher in memory only. - reusableKtop reusableKtop -} - -type mask struct { - // L_*, L_$, (L_i)_{i ∈ N} - lAst []byte - lDol []byte - L [][]byte -} - -type reusableKtop struct { - noncePrefix []byte - Ktop []byte -} - -const ( - defaultTagSize = 16 - defaultNonceSize = 15 -) - -const ( - enc = iota - dec -) - -func (o *ocb) NonceSize() int { - return o.nonceSize -} - -func (o *ocb) Overhead() int { - return o.tagSize -} - -// NewOCB returns an OCB instance with the given block cipher and default -// tag and nonce sizes. -func NewOCB(block cipher.Block) (cipher.AEAD, error) { - return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) -} - -// NewOCBWithNonceAndTagSize returns an OCB instance with the given block -// cipher, nonce length, and tag length. Panics on zero nonceSize and -// exceedingly long tag size. -// -// It is recommended to use at least 12 bytes as tag length. -func NewOCBWithNonceAndTagSize( - block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { - if block.BlockSize() != 16 { - return nil, ocbError("Block cipher must have 128-bit blocks") - } - if nonceSize < 1 { - return nil, ocbError("Incorrect nonce length") - } - if nonceSize >= block.BlockSize() { - return nil, ocbError("Nonce length exceeds blocksize - 1") - } - if tagSize > block.BlockSize() { - return nil, ocbError("Custom tag length exceeds blocksize") - } - return &ocb{ - block: block, - tagSize: tagSize, - nonceSize: nonceSize, - mask: initializeMaskTable(block), - reusableKtop: reusableKtop{ - noncePrefix: nil, - Ktop: nil, - }, - }, nil -} - -func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { - if len(nonce) > o.nonceSize { - panic("crypto/ocb: Incorrect nonce length given to OCB") - } - ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) - o.crypt(enc, out, nonce, adata, plaintext) - return ret -} - -func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { - if len(nonce) > o.nonceSize { - panic("Nonce too long for this instance") - } - if len(ciphertext) < o.tagSize { - return nil, ocbError("Ciphertext shorter than tag length") - } - sep := len(ciphertext) - o.tagSize - ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) - ciphertextData := ciphertext[:sep] - tag := ciphertext[sep:] - o.crypt(dec, out, nonce, adata, ciphertextData) - if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { - ret = ret[:sep] - return ret, nil - } - for i := range out { - out[i] = 0 - } - return nil, ocbError("Tag authentication failed") -} - -// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) -// function. It returns the resulting plain/ciphertext with the tag appended. -func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { - // - // Consider X as a sequence of 128-bit blocks - // - // Note: For encryption (resp. decryption), X is the plaintext (resp., the - // ciphertext without the tag). - blockSize := o.block.BlockSize() - - // - // Nonce-dependent and per-encryption variables - // - // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop - // is already computed. - truncatedNonce := make([]byte, len(nonce)) - copy(truncatedNonce, nonce) - truncatedNonce[len(truncatedNonce)-1] &= 192 - Ktop := make([]byte, blockSize) - if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { - Ktop = o.reusableKtop.Ktop - } else { - // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N - paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) - paddedNonce = append(paddedNonce, truncatedNonce...) - paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) - // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop - paddedNonce[blockSize-1] &= 192 - Ktop = paddedNonce - o.block.Encrypt(Ktop, Ktop) - o.reusableKtop.noncePrefix = truncatedNonce - o.reusableKtop.Ktop = Ktop - } - - // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) - xorHalves := make([]byte, blockSize/2) - byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) - stretch := append(Ktop, xorHalves...) - bottom := int(nonce[len(nonce)-1] & 63) - offset := make([]byte, len(stretch)) - byteutil.ShiftNBytesLeft(offset, stretch, bottom) - offset = offset[:blockSize] - - // - // Process any whole blocks - // - // Note: For encryption Y is ciphertext || tag, for decryption Y is - // plaintext || tag. - checksum := make([]byte, blockSize) - m := len(X) / blockSize - for i := 0; i < m; i++ { - index := bits.TrailingZeros(uint(i + 1)) - if len(o.mask.L)-1 < index { - o.mask.extendTable(index) - } - byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) - blockX := X[i*blockSize : (i+1)*blockSize] - blockY := Y[i*blockSize : (i+1)*blockSize] - byteutil.XorBytes(blockY, blockX, offset) - switch instruction { - case enc: - o.block.Encrypt(blockY, blockY) - byteutil.XorBytesMut(blockY, offset) - byteutil.XorBytesMut(checksum, blockX) - case dec: - o.block.Decrypt(blockY, blockY) - byteutil.XorBytesMut(blockY, offset) - byteutil.XorBytesMut(checksum, blockY) - } - } - // - // Process any final partial block and compute raw tag - // - tag := make([]byte, blockSize) - if len(X)%blockSize != 0 { - byteutil.XorBytesMut(offset, o.mask.lAst) - pad := make([]byte, blockSize) - o.block.Encrypt(pad, offset) - chunkX := X[blockSize*m:] - chunkY := Y[blockSize*m : len(X)] - byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) - // P_* || bit(1) || zeroes(127) - len(P_*) - switch instruction { - case enc: - paddedY := append(chunkX, byte(128)) - paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) - byteutil.XorBytesMut(checksum, paddedY) - case dec: - paddedX := append(chunkY, byte(128)) - paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) - byteutil.XorBytesMut(checksum, paddedX) - } - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) - } else { - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m:], tag[:o.tagSize]) - } - return Y -} - -// This hash function is used to compute the tag. Per design, on empty input it -// returns a slice of zeros, of the same length as the underlying block cipher -// block size. -func (o *ocb) hash(adata []byte) []byte { - // - // Consider A as a sequence of 128-bit blocks - // - A := make([]byte, len(adata)) - copy(A, adata) - blockSize := o.block.BlockSize() - - // - // Process any whole blocks - // - sum := make([]byte, blockSize) - offset := make([]byte, blockSize) - m := len(A) / blockSize - for i := 0; i < m; i++ { - chunk := A[blockSize*i : blockSize*(i+1)] - index := bits.TrailingZeros(uint(i + 1)) - // If the mask table is too short - if len(o.mask.L)-1 < index { - o.mask.extendTable(index) - } - byteutil.XorBytesMut(offset, o.mask.L[index]) - byteutil.XorBytesMut(chunk, offset) - o.block.Encrypt(chunk, chunk) - byteutil.XorBytesMut(sum, chunk) - } - - // - // Process any final partial block; compute final hash value - // - if len(A)%blockSize != 0 { - byteutil.XorBytesMut(offset, o.mask.lAst) - // Pad block with 1 || 0 ^ 127 - bitlength(a) - ending := make([]byte, blockSize-len(A)%blockSize) - ending[0] = 0x80 - encrypted := append(A[blockSize*m:], ending...) - byteutil.XorBytesMut(encrypted, offset) - o.block.Encrypt(encrypted, encrypted) - byteutil.XorBytesMut(sum, encrypted) - } - return sum -} - -func initializeMaskTable(block cipher.Block) mask { - // - // Key-dependent variables - // - lAst := make([]byte, block.BlockSize()) - block.Encrypt(lAst, lAst) - lDol := byteutil.GfnDouble(lAst) - L := make([][]byte, 1) - L[0] = byteutil.GfnDouble(lDol) - - return mask{ - lAst: lAst, - lDol: lDol, - L: L, - } -} - -// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) -func (m *mask) extendTable(limit int) { - for i := len(m.L); i <= limit; i++ { - m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) - } -} - -func ocbError(err string) error { - return errors.New("crypto/ocb: " + err) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go deleted file mode 100644 index 0efaf344fd..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go +++ /dev/null @@ -1,136 +0,0 @@ -// In the test vectors provided by RFC 7253, the "bottom" -// internal variable, which defines "offset" for the first time, does not -// exceed 15. However, it can attain values up to 63. - -// These vectors include key length in {128, 192, 256}, tag size 128, and -// random nonce, header, and plaintext lengths. - -// This file was automatically generated. - -package ocb - -var randomVectors = []struct { - key, nonce, header, plaintext, ciphertext string -}{ - - {"9438C5D599308EAF13F800D2D31EA7F0", - "C38EE4801BEBFFA1CD8635BE", - "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", - "962D227786FB8913A8BAD5DC3250", - "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, - {"BA7DE631C7D6712167C6724F5B9A2B1D", - "35263EBDA05765DC0E71F1F5", - "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", - "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", - "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, - {"2E74B25289F6FD3E578C24866E9C72A5", - "FD912F15025AF8414642BA1D1D", - "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", - "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", - "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, - {"E7EC507C802528F790AFF5303A017B17", - "4B97A7A568940A9E3CE7A99E93031E", - "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", - "9455B3EA706B74", - "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, - {"6C928AA3224736F28EE7378DE0090191", - "8936138E2E4C6A13280017A1622D", - "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", - "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", - "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, - {"ECEA315CA4B3F425B0C9957A17805EA4", - "664CDAE18403F4F9BA13015A44FC", - "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", - "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", - "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, - {"4EE616C4A58AAA380878F71A373461F6", - "91B8C9C176D9C385E9C47E52", - "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", - "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", - "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, - {"DCD475773136C830D5E3D0C5FE05B7FF", - "BB8E1FBB483BE7616A922C4A", - "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", - "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", - "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, - {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", - "FACF804A4BEBF998505FF9DE", - "8213B9263B2971A5BDA18DBD02208EE1", - "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", - "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, - {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", - "0B0EF53D4606B28D1398355F", - "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", - "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", - "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, - {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", - "5C83F4896D0738E1366B1836", - "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", - "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", - "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, - {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", - "36B88F63ADBB5668588181D774", - "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", - "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", - "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, - {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", - "BC099AEB637361BAC536B57618", - "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", - "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", - "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, - {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", - "B1B0AAF373B8B026EB80422051D8", - "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", - "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", - "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, - {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", - "2F2F17CECF7E5A756D10785A3CB9DB", - "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", - "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", - "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, - {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", - "BCA625674F41D1E3AB47672DC0C3", - "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", - "E525422519ECE070E82C", - "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, - {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", - "36D2EC25ADD33CDEDF495205BBC923", - "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", - "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", - "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, - {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", - "81691D5D20EC818FCFF24B33DECC", - "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", - "EB861165DAF7625F827C6B574ED703F03215", - "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, - {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", - "D2A7B94DD12CDACA909D3AD7", - "E021A78F374FC271389AB9A3E97077D755", - "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", - "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, - {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", - "65ED3D63F79F90BBFD19775E", - "336A8C0B7243582A46B221AA677647FCAE91", - "134A8B34824A290E7B", - "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, - {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", - "C391A856B9FE234E14BA1AC7BB40FF", - "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", - "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", - "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, - {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", - "B15B61C8BB39261A8F55AB178EC3", - "D0729B6B75BB", - "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", - "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, - {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", - "01D7BDB1133E9C347486C1EFA6", - "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", - "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", - "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, - {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", - "0C52B3D11D636E5910A4DD76D32C", - "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", - "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", - "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go deleted file mode 100644 index 330309ff5f..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go +++ /dev/null @@ -1,78 +0,0 @@ -package ocb - -import ( - "encoding/hex" -) - -// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is -// shared across tests. -var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") - -var rfc7253testVectors = []struct { - nonce, header, plaintext, ciphertext string -}{ - {"BBAA99887766554433221100", - "", - "", - "785407BFFFC8AD9EDCC5520AC9111EE6"}, - {"BBAA99887766554433221101", - "0001020304050607", - "0001020304050607", - "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, - {"BBAA99887766554433221102", - "0001020304050607", - "", - "81017F8203F081277152FADE694A0A00"}, - {"BBAA99887766554433221103", - "", - "0001020304050607", - "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, - {"BBAA99887766554433221104", - "000102030405060708090A0B0C0D0E0F", - "000102030405060708090A0B0C0D0E0F", - "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, - {"BBAA99887766554433221105", - "000102030405060708090A0B0C0D0E0F", - "", - "8CF761B6902EF764462AD86498CA6B97"}, - {"BBAA99887766554433221106", - "", - "000102030405060708090A0B0C0D0E0F", - "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, - {"BBAA99887766554433221107", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, - {"BBAA99887766554433221108", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "", - "6DC225A071FC1B9F7C69F93B0F1E10DE"}, - {"BBAA99887766554433221109", - "", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, - {"BBAA9988776655443322110A", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, - {"BBAA9988776655443322110B", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "", - "FE80690BEE8A485D11F32965BC9D2A32"}, - {"BBAA9988776655443322110C", - "", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, - {"BBAA9988776655443322110D", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, - {"BBAA9988776655443322110E", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "", - "C5CD9D1850C141E358649994EE701B68"}, - {"BBAA9988776655443322110F", - "", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go deleted file mode 100644 index 5dc158f012..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go +++ /dev/null @@ -1,24 +0,0 @@ -package ocb - -// Second set of test vectors from https://tools.ietf.org/html/rfc7253 -var rfc7253TestVectorTaglen96 = struct { - key, nonce, header, plaintext, ciphertext string -}{"0F0E0D0C0B0A09080706050403020100", - "BBAA9988776655443322110D", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} - -var rfc7253AlgorithmTest = []struct { - KEYLEN, TAGLEN int - OUTPUT string }{ - {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, - {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, - {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, - {128, 96, "77A3D8E73589158D25D01209"}, - {192, 96, "05D56EAD2752C86BE6932C5E"}, - {256, 96, "5458359AC23B0CBA9E6330DD"}, - {128, 64, "192C9B7BD90BA06A"}, - {192, 64, "0066BC6E0EF34E24"}, - {256, 64, "7D4EA5D445501CBE"}, - } diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go deleted file mode 100644 index 3c6251d1ce..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 Matthew Endsley -// All rights reserved -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted providing that the following conditions -// are met: -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -// POSSIBILITY OF SUCH DAMAGE. - -// Package keywrap is an implementation of the RFC 3394 AES key wrapping -// algorithm. This is used in OpenPGP with elliptic curve keys. -package keywrap - -import ( - "crypto/aes" - "encoding/binary" - "errors" -) - -var ( - // ErrWrapPlaintext is returned if the plaintext is not a multiple - // of 64 bits. - ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") - - // ErrUnwrapCiphertext is returned if the ciphertext is not a - // multiple of 64 bits. - ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") - - // ErrUnwrapFailed is returned if unwrapping a key fails. - ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") - - // NB: the AES NewCipher call only fails if the key is an invalid length. - - // ErrInvalidKey is returned when the AES key is invalid. - ErrInvalidKey = errors.New("keywrap: invalid AES key") -) - -// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. -func Wrap(key, plainText []byte) ([]byte, error) { - if len(plainText)%8 != 0 { - return nil, ErrWrapPlaintext - } - - c, err := aes.NewCipher(key) - if err != nil { - return nil, ErrInvalidKey - } - - nblocks := len(plainText) / 8 - - // 1) Initialize variables. - var block [aes.BlockSize]byte - // - Set A = IV, an initial value (see 2.2.3) - for ii := 0; ii < 8; ii++ { - block[ii] = 0xA6 - } - - // - For i = 1 to n - // - Set R[i] = P[i] - intermediate := make([]byte, len(plainText)) - copy(intermediate, plainText) - - // 2) Calculate intermediate values. - for ii := 0; ii < 6; ii++ { - for jj := 0; jj < nblocks; jj++ { - // - B = AES(K, A | R[i]) - copy(block[8:], intermediate[jj*8:jj*8+8]) - c.Encrypt(block[:], block[:]) - - // - A = MSB(64, B) ^ t where t = (n*j)+1 - t := uint64(ii*nblocks + jj + 1) - val := binary.BigEndian.Uint64(block[:8]) ^ t - binary.BigEndian.PutUint64(block[:8], val) - - // - R[i] = LSB(64, B) - copy(intermediate[jj*8:jj*8+8], block[8:]) - } - } - - // 3) Output results. - // - Set C[0] = A - // - For i = 1 to n - // - C[i] = R[i] - return append(block[:8], intermediate...), nil -} - -// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. -func Unwrap(key, cipherText []byte) ([]byte, error) { - if len(cipherText)%8 != 0 { - return nil, ErrUnwrapCiphertext - } - - c, err := aes.NewCipher(key) - if err != nil { - return nil, ErrInvalidKey - } - - nblocks := len(cipherText)/8 - 1 - - // 1) Initialize variables. - var block [aes.BlockSize]byte - // - Set A = C[0] - copy(block[:8], cipherText[:8]) - - // - For i = 1 to n - // - Set R[i] = C[i] - intermediate := make([]byte, len(cipherText)-8) - copy(intermediate, cipherText[8:]) - - // 2) Compute intermediate values. - for jj := 5; jj >= 0; jj-- { - for ii := nblocks - 1; ii >= 0; ii-- { - // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 - // - A = MSB(64, B) - t := uint64(jj*nblocks + ii + 1) - val := binary.BigEndian.Uint64(block[:8]) ^ t - binary.BigEndian.PutUint64(block[:8], val) - - copy(block[8:], intermediate[ii*8:ii*8+8]) - c.Decrypt(block[:], block[:]) - - // - R[i] = LSB(B, 64) - copy(intermediate[ii*8:ii*8+8], block[8:]) - } - } - - // 3) Output results. - // - If A is an appropriate initial value (see 2.2.3), - for ii := 0; ii < 8; ii++ { - if block[ii] != 0xA6 { - return nil, ErrUnwrapFailed - } - } - - // - For i = 1 to n - // - P[i] = R[i] - return intermediate, nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go deleted file mode 100644 index 3b357e5851..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if bytes.HasPrefix(line, armorEnd) { - l.eof = true - return 0, io.EOF - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - l.crcSet = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go deleted file mode 100644 index 6f07582c37..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go deleted file mode 100644 index a94f6150c4..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "hash" - "io" -) - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { - start := 0 - for i, c := range buf { - switch *s { - case 0: - if c == '\r' { - *s = 1 - } else if c == '\n' { - cw.Write(buf[start:i]) - cw.Write(newline) - start = i + 1 - } - case 1: - *s = 0 - } - } - - cw.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - return writeCanonical(cth.h, buf, &cth.s) -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go deleted file mode 100644 index b09e2a7359..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ecdh implements ECDH encryption, suitable for OpenPGP, -// as specified in RFC 6637, section 8. -package ecdh - -import ( - "bytes" - "errors" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" -) - -type KDF struct { - Hash algorithm.Hash - Cipher algorithm.Cipher -} - -type PublicKey struct { - curve ecc.ECDHCurve - Point []byte - KDF -} - -type PrivateKey struct { - PublicKey - D []byte -} - -func NewPublicKey(curve ecc.ECDHCurve, kdfHash algorithm.Hash, kdfCipher algorithm.Cipher) *PublicKey { - return &PublicKey{ - curve: curve, - KDF: KDF{ - Hash: kdfHash, - Cipher: kdfCipher, - }, - } -} - -func NewPrivateKey(key PublicKey) *PrivateKey { - return &PrivateKey{ - PublicKey: key, - } -} - -func (pk *PublicKey) GetCurve() ecc.ECDHCurve { - return pk.curve -} - -func (pk *PublicKey) MarshalPoint() []byte { - return pk.curve.MarshalBytePoint(pk.Point) -} - -func (pk *PublicKey) UnmarshalPoint(p []byte) error { - pk.Point = pk.curve.UnmarshalBytePoint(p) - if pk.Point == nil { - return errors.New("ecdh: failed to parse EC point") - } - return nil -} - -func (sk *PrivateKey) MarshalByteSecret() []byte { - return sk.curve.MarshalByteSecret(sk.D) -} - -func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { - sk.D = sk.curve.UnmarshalByteSecret(d) - - if sk.D == nil { - return errors.New("ecdh: failed to parse scalar") - } - return nil -} - -func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) { - priv = new(PrivateKey) - priv.PublicKey.curve = c - priv.PublicKey.KDF = kdf - priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand) - return -} - -func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { - if len(msg) > 40 { - return nil, nil, errors.New("ecdh: message too long") - } - // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, - // AES-192, and AES-256, respectively, to provide the same number of - // octets, 40 total, as an input to the key wrapping method. - padding := make([]byte, 40-len(msg)) - for i := range padding { - padding[i] = byte(40 - len(msg)) - } - m := append(msg, padding...) - - ephemeral, zb, err := pub.curve.Encaps(random, pub.Point) - if err != nil { - return nil, nil, err - } - - vsG = pub.curve.MarshalBytePoint(ephemeral) - - z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) - if err != nil { - return nil, nil, err - } - - if c, err = keywrap.Wrap(z, m); err != nil { - return nil, nil, err - } - - return vsG, c, nil - -} - -func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) { - var m []byte - zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D) - - // Try buildKey three times to workaround an old bug, see comments in buildKey. - for i := 0; i < 3; i++ { - var z []byte - // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );" - z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2) - if err != nil { - return nil, err - } - - // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]" - m, err = keywrap.Unwrap(z, c) - if err == nil { - break - } - } - - // Only return an error after we've tried all (required) variants of buildKey. - if err != nil { - return nil, err - } - - // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding" - // The last byte should be the length of the padding, as per PKCS5; strip it off. - return m[:len(m)-int(m[len(m)-1])], nil -} - -func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { - // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03 - // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap - // || "Anonymous Sender " || recipient_fingerprint; - param := new(bytes.Buffer) - if _, err := param.Write(curveOID); err != nil { - return nil, err - } - algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()} - if _, err := param.Write(algKDF); err != nil { - return nil, err - } - if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { - return nil, err - } - // For v5 keys, the 20 leftmost octets of the fingerprint are used. - if _, err := param.Write(fingerprint[:20]); err != nil { - return nil, err - } - if param.Len() - len(curveOID) != 45 { - return nil, errors.New("ecdh: malformed KDF Param") - } - - // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); - h := pub.KDF.Hash.New() - if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { - return nil, err - } - zbLen := len(zb) - i := 0 - j := zbLen - 1 - if stripLeading { - // Work around old go crypto bug where the leading zeros are missing. - for ; i < zbLen && zb[i] == 0; i++ {} - } - if stripTrailing { - // Work around old OpenPGP.js bug where insignificant trailing zeros in - // this little-endian number are missing. - // (See https://github.com/openpgpjs/openpgpjs/pull/853.) - for ; j >= 0 && zb[j] == 0; j-- {} - } - if _, err := h.Write(zb[i:j+1]); err != nil { - return nil, err - } - if _, err := h.Write(param.Bytes()); err != nil { - return nil, err - } - mb := h.Sum(nil) - - return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. - -} - -func Validate(priv *PrivateKey) error { - return priv.curve.ValidateECDH(priv.Point, priv.D) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go deleted file mode 100644 index 6682a21a60..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go +++ /dev/null @@ -1,80 +0,0 @@ -// Package ecdsa implements ECDSA signature, suitable for OpenPGP, -// as specified in RFC 6637, section 5. -package ecdsa - -import ( - "errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "io" - "math/big" -) - -type PublicKey struct { - X, Y *big.Int - curve ecc.ECDSACurve -} - -type PrivateKey struct { - PublicKey - D *big.Int -} - -func NewPublicKey(curve ecc.ECDSACurve) *PublicKey { - return &PublicKey{ - curve: curve, - } -} - -func NewPrivateKey(key PublicKey) *PrivateKey { - return &PrivateKey{ - PublicKey: key, - } -} - -func (pk *PublicKey) GetCurve() ecc.ECDSACurve { - return pk.curve -} - -func (pk *PublicKey) MarshalPoint() []byte { - return pk.curve.MarshalIntegerPoint(pk.X, pk.Y) -} - -func (pk *PublicKey) UnmarshalPoint(p []byte) error { - pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p) - if pk.X == nil { - return errors.New("ecdsa: failed to parse EC point") - } - return nil -} - -func (sk *PrivateKey) MarshalIntegerSecret() []byte { - return sk.curve.MarshalIntegerSecret(sk.D) -} - -func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error { - sk.D = sk.curve.UnmarshalIntegerSecret(d) - - if sk.D == nil { - return errors.New("ecdsa: failed to parse scalar") - } - return nil -} - -func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) { - priv = new(PrivateKey) - priv.PublicKey.curve = c - priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand) - return -} - -func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) { - return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash) -} - -func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { - return pub.curve.Verify(pub.X, pub.Y, hash, r, s) -} - -func Validate(priv *PrivateKey) error { - return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes()) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go deleted file mode 100644 index 12866c12df..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go +++ /dev/null @@ -1,91 +0,0 @@ -// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in -// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 -package eddsa - -import ( - "errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "io" -) - -type PublicKey struct { - X []byte - curve ecc.EdDSACurve -} - -type PrivateKey struct { - PublicKey - D []byte -} - -func NewPublicKey(curve ecc.EdDSACurve) *PublicKey { - return &PublicKey{ - curve: curve, - } -} - -func NewPrivateKey(key PublicKey) *PrivateKey { - return &PrivateKey{ - PublicKey: key, - } -} - -func (pk *PublicKey) GetCurve() ecc.EdDSACurve { - return pk.curve -} - -func (pk *PublicKey) MarshalPoint() []byte { - return pk.curve.MarshalBytePoint(pk.X) -} - -func (pk *PublicKey) UnmarshalPoint(x []byte) error { - pk.X = pk.curve.UnmarshalBytePoint(x) - - if pk.X == nil { - return errors.New("eddsa: failed to parse EC point") - } - return nil -} - -func (sk *PrivateKey) MarshalByteSecret() []byte { - return sk.curve.MarshalByteSecret(sk.D) -} - -func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { - sk.D = sk.curve.UnmarshalByteSecret(d) - - if sk.D == nil { - return errors.New("eddsa: failed to parse scalar") - } - return nil -} - -func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) { - priv = new(PrivateKey) - priv.PublicKey.curve = c - priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand) - return -} - -func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) { - sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message) - if err != nil { - return nil, nil, err - } - - r, s = priv.PublicKey.curve.MarshalSignature(sig) - return -} - -func Verify(pub *PublicKey, message, r, s []byte) bool { - sig := pub.curve.UnmarshalSignature(r, s) - if sig == nil { - return false - } - - return pub.curve.Verify(pub.X, message, sig) -} - -func Validate(priv *PrivateKey) error { - return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 6a07d8ff27..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - if s.ModInverse(s, priv.P) == nil { - return nil, errors.New("elgamal: invalid private key") - } - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go deleted file mode 100644 index 17e2bcfed2..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") -var ErrMDCMissing error = SignatureError("MDC packet not found") - -type signatureExpiredError int - -func (se signatureExpiredError) Error() string { - return "openpgp: signature expired" -} - -var ErrSignatureExpired error = signatureExpiredError(0) - -type keyExpiredError int - -func (ke keyExpiredError) Error() string { - return "openpgp: key expired" -} - -var ErrKeyExpired error = keyExpiredError(0) - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -// KeyInvalidError indicates that the public key parameters are invalid -// as they do not match the private ones -type KeyInvalidError string - -func (e KeyInvalidError) Error() string { - return "openpgp: invalid key: " + string(e) -} - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} - -// AEADError indicates that there is a problem when initializing or using a -// AEAD instance, configuration struct, nonces or index values. -type AEADError string - -func (ae AEADError) Error() string { - return "openpgp: aead error: " + string(ae) -} - -// ErrDummyPrivateKey results when operations are attempted on a private key -// that is just a dummy key. See -// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 -type ErrDummyPrivateKey string - -func (dke ErrDummyPrivateKey) Error() string { - return "openpgp: s2k GNU dummy key: " + string(dke) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go deleted file mode 100644 index d067065186..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -package algorithm - -import ( - "crypto/cipher" - "github.com/ProtonMail/go-crypto/eax" - "github.com/ProtonMail/go-crypto/ocb" -) - -// AEADMode defines the Authenticated Encryption with Associated Data mode of -// operation. -type AEADMode uint8 - -// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) -const ( - AEADModeEAX = AEADMode(1) - AEADModeOCB = AEADMode(2) - AEADModeGCM = AEADMode(3) -) - -// TagLength returns the length in bytes of authentication tags. -func (mode AEADMode) TagLength() int { - switch mode { - case AEADModeEAX: - return 16 - case AEADModeOCB: - return 16 - case AEADModeGCM: - return 16 - default: - return 0 - } -} - -// NonceLength returns the length in bytes of nonces. -func (mode AEADMode) NonceLength() int { - switch mode { - case AEADModeEAX: - return 16 - case AEADModeOCB: - return 15 - case AEADModeGCM: - return 12 - default: - return 0 - } -} - -// New returns a fresh instance of the given mode -func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { - var err error - switch mode { - case AEADModeEAX: - alg, err = eax.NewEAX(block) - case AEADModeOCB: - alg, err = ocb.NewOCB(block) - case AEADModeGCM: - alg, err = cipher.NewGCM(block) - } - if err != nil { - panic(err.Error()) - } - return alg -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go deleted file mode 100644 index 5760cff80e..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package algorithm - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - - "golang.org/x/crypto/cast5" -) - -// Cipher is an official symmetric key cipher algorithm. See RFC 4880, -// section 9.2. -type Cipher interface { - // Id returns the algorithm ID, as a byte, of the cipher. - Id() uint8 - // KeySize returns the key size, in bytes, of the cipher. - KeySize() int - // BlockSize returns the block size, in bytes, of the cipher. - BlockSize() int - // New returns a fresh instance of the given cipher. - New(key []byte) cipher.Block -} - -// The following constants mirror the OpenPGP standard (RFC 4880). -const ( - TripleDES = CipherFunction(2) - CAST5 = CipherFunction(3) - AES128 = CipherFunction(7) - AES192 = CipherFunction(8) - AES256 = CipherFunction(9) -) - -// CipherById represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -var CipherById = map[uint8]Cipher{ - TripleDES.Id(): TripleDES, - CAST5.Id(): CAST5, - AES128.Id(): AES128, - AES192.Id(): AES192, - AES256.Id(): AES256, -} - -type CipherFunction uint8 - -// ID returns the algorithm Id, as a byte, of cipher. -func (sk CipherFunction) Id() uint8 { - return uint8(sk) -} - -var keySizeByID = map[uint8]int{ - TripleDES.Id(): 24, - CAST5.Id(): cast5.KeySize, - AES128.Id(): 16, - AES192.Id(): 24, - AES256.Id(): 32, -} - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case TripleDES: - return 24 - case CAST5: - return cast5.KeySize - case AES128: - return 16 - case AES192: - return 24 - case AES256: - return 32 - } - return 0 -} - -// BlockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) BlockSize() int { - switch cipher { - case TripleDES: - return des.BlockSize - case CAST5: - return 8 - case AES128, AES192, AES256: - return 16 - } - return 0 -} - -// New returns a fresh instance of the given cipher. -func (cipher CipherFunction) New(key []byte) (block cipher.Block) { - var err error - switch cipher { - case TripleDES: - block, err = des.NewTripleDESCipher(key) - case CAST5: - block, err = cast5.NewCipher(key) - case AES128, AES192, AES256: - block, err = aes.NewCipher(key) - } - if err != nil { - panic(err.Error()) - } - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go deleted file mode 100644 index 82e43d6740..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package algorithm - -import ( - "crypto" - "fmt" - "hash" -) - -// Hash is an official hash function algorithm. See RFC 4880, section 9.4. -type Hash interface { - // Id returns the algorithm ID, as a byte, of Hash. - Id() uint8 - // Available reports whether the given hash function is linked into the binary. - Available() bool - // HashFunc simply returns the value of h so that Hash implements SignerOpts. - HashFunc() crypto.Hash - // New returns a new hash.Hash calculating the given hash function. New - // panics if the hash function is not linked into the binary. - New() hash.Hash - // Size returns the length, in bytes, of a digest resulting from the given - // hash function. It doesn't require that the hash function in question be - // linked into the program. - Size() int - // String is the name of the hash function corresponding to the given - // OpenPGP hash id. - String() string -} - -// The following vars mirror the crypto/Hash supported hash functions. -var ( - SHA1 Hash = cryptoHash{2, crypto.SHA1} - SHA256 Hash = cryptoHash{8, crypto.SHA256} - SHA384 Hash = cryptoHash{9, crypto.SHA384} - SHA512 Hash = cryptoHash{10, crypto.SHA512} - SHA224 Hash = cryptoHash{11, crypto.SHA224} - SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256} - SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512} -) - -// HashById represents the different hash functions specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 -var ( - HashById = map[uint8]Hash{ - SHA256.Id(): SHA256, - SHA384.Id(): SHA384, - SHA512.Id(): SHA512, - SHA224.Id(): SHA224, - SHA3_256.Id(): SHA3_256, - SHA3_512.Id(): SHA3_512, - } -) - -// cryptoHash contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -type cryptoHash struct { - id uint8 - crypto.Hash -} - -// Id returns the algorithm ID, as a byte, of cryptoHash. -func (h cryptoHash) Id() uint8 { - return h.id -} - -var hashNames = map[uint8]string{ - SHA256.Id(): "SHA256", - SHA384.Id(): "SHA384", - SHA512.Id(): "SHA512", - SHA224.Id(): "SHA224", - SHA3_256.Id(): "SHA3-256", - SHA3_512.Id(): "SHA3-512", -} - -func (h cryptoHash) String() string { - s, ok := hashNames[h.id] - if !ok { - panic(fmt.Sprintf("Unsupported hash function %d", h.id)) - } - return s -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - if hash, ok := HashById[id]; ok { - return hash.HashFunc(), true - } - return 0, false -} - -// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP -// hash id, allowing sha1. -func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) { - if hash, ok := HashById[id]; ok { - return hash.HashFunc(), true - } - - if id == SHA1.Id() { - return SHA1.HashFunc(), true - } - - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - if hash, ok := HashById[id]; ok { - return hash.String(), true - } - return "", false -} - -// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for id, hash := range HashById { - if hash.HashFunc() == h { - return id, true - } - } - - return 0, false -} - -// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash, -// allowing instances of SHA1 -func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) { - for id, hash := range HashById { - if hash.HashFunc() == h { - return id, true - } - } - - if h == SHA1.HashFunc() { - return SHA1.Id(), true - } - - return 0, false -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go deleted file mode 100644 index 266635ec57..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go +++ /dev/null @@ -1,171 +0,0 @@ -// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. -package ecc - -import ( - "crypto/subtle" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - x25519lib "github.com/cloudflare/circl/dh/x25519" -) - -type curve25519 struct {} - -func NewCurve25519() *curve25519 { - return &curve25519{} -} - -func (c *curve25519) GetCurveName() string { - return "curve25519" -} - -// MarshalBytePoint encodes the public point from native format, adding the prefix. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 -func (c *curve25519) MarshalBytePoint(point [] byte) []byte { - return append([]byte{0x40}, point...) -} - -// UnmarshalBytePoint decodes the public point to native format, removing the prefix. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 -func (c *curve25519) UnmarshalBytePoint(point []byte) []byte { - if len(point) != x25519lib.Size + 1 { - return nil - } - - // Remove prefix - return point[1:] -} - -// MarshalByteSecret encodes the secret scalar from native format. -// Note that the EC secret scalar differs from the definition of public keys in -// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is -// more uniform with how big integers are represented in OpenPGP, and (2) the -// leading zeros are truncated. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 -// Note that leading zero bytes are stripped later when encoding as an MPI. -func (c *curve25519) MarshalByteSecret(secret []byte) []byte { - d := make([]byte, x25519lib.Size) - copyReversed(d, secret) - - // The following ensures that the private key is a number of the form - // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of - // the curve. - // - // This masking is done internally in the underlying lib and so is unnecessary - // for security, but OpenPGP implementations require that private keys be - // pre-masked. - d[0] &= 127 - d[0] |= 64 - d[31] &= 248 - - return d -} - -// UnmarshalByteSecret decodes the secret scalar from native format. -// Note that the EC secret scalar differs from the definition of public keys in -// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is -// more uniform with how big integers are represented in OpenPGP, and (2) the -// leading zeros are truncated. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 -func (c *curve25519) UnmarshalByteSecret(d []byte) []byte { - if len(d) > x25519lib.Size { - return nil - } - - // Ensure truncated leading bytes are re-added - secret := make([]byte, x25519lib.Size) - copyReversed(secret, d) - - return secret -} - -// generateKeyPairBytes Generates a private-public key-pair. -// 'priv' is a private key; a little-endian scalar belonging to the set -// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the -// curve. 'pub' is simply 'priv' * G where G is the base point. -// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. -func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) { - _, err = io.ReadFull(rand, priv[:]) - if err != nil { - return - } - - x25519lib.KeyGen(&pub, &priv) - return -} - -func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { - priv, pub, err := c.generateKeyPairBytes(rand) - if err != nil { - return - } - - return pub[:], priv[:], nil -} - -func (c *genericCurve) MaskSecret(secret []byte) []byte { - return secret -} - -func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { - // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}" - // ephemeralPrivate corresponds to `v`. - // ephemeralPublic corresponds to `V`. - ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand) - if err != nil { - return nil, nil, err - } - - // RFC6637 §8: "Obtain the authenticated recipient public key R" - // pubKey corresponds to `R`. - var pubKey x25519lib.Key - copy(pubKey[:], point) - - // RFC6637 §8: "Compute the shared point S = vR" - // "VB = convert point V to the octet string" - // sharedPoint corresponds to `VB`. - var sharedPoint x25519lib.Key - x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey) - - return ephemeralPublic[:], sharedPoint[:], nil -} - -func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) { - var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key - // RFC6637 §8: "The decryption is the inverse of the method given." - // All quoted descriptions in comments below describe encryption, and - // the reverse is performed. - // vsG corresponds to `VB` in RFC6637 §8 . - - // RFC6637 §8: "VB = convert point V to the octet string" - copy(ephemeralPublic[:], vsG) - - // decodedPrivate corresponds to `r` in RFC6637 §8 . - copy(decodedPrivate[:], secret) - - // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating - // S = rV = rvG, where (r,R) is the recipient's key pair." - // sharedPoint corresponds to `S`. - x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic) - - return sharedPoint[:], nil -} - -func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) { - var pk, sk x25519lib.Key - copy(sk[:], secret) - x25519lib.KeyGen(&pk, &sk) - - if subtle.ConstantTimeCompare(point, pk[:]) == 0 { - return errors.KeyInvalidError("ecc: invalid curve25519 public point") - } - - return nil -} - -func copyReversed(out []byte, in []byte) { - l := len(in) - for i := 0; i < l; i++ { - out[i] = in[l-i-1] - } -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go deleted file mode 100644 index df2878c957..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go +++ /dev/null @@ -1,140 +0,0 @@ -// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. -package ecc - -import ( - "bytes" - "crypto/elliptic" - "github.com/ProtonMail/go-crypto/bitcurves" - "github.com/ProtonMail/go-crypto/brainpool" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" -) - -type CurveInfo struct { - GenName string - Oid *encoding.OID - Curve Curve -} - -var Curves = []CurveInfo{ - { - // NIST P-256 - GenName: "P256", - Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), - Curve: NewGenericCurve(elliptic.P256()), - }, - { - // NIST P-384 - GenName: "P384", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), - Curve: NewGenericCurve(elliptic.P384()), - }, - { - // NIST P-521 - GenName: "P521", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), - Curve: NewGenericCurve(elliptic.P521()), - }, - { - // SecP256k1 - GenName: "SecP256k1", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), - Curve: NewGenericCurve(bitcurves.S256()), - }, - { - // Curve25519 - GenName: "Curve25519", - Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), - Curve: NewCurve25519(), - }, - { - // X448 - GenName: "Curve448", - Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), - Curve: NewX448(), - }, - { - // Ed25519 - GenName: "Curve25519", - Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), - Curve: NewEd25519(), - }, - { - // Ed448 - GenName: "Curve448", - Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}), - Curve: NewEd448(), - }, - { - // BrainpoolP256r1 - GenName: "BrainpoolP256", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), - Curve: NewGenericCurve(brainpool.P256r1()), - }, - { - // BrainpoolP384r1 - GenName: "BrainpoolP384", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), - Curve: NewGenericCurve(brainpool.P384r1()), - }, - { - // BrainpoolP512r1 - GenName: "BrainpoolP512", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), - Curve: NewGenericCurve(brainpool.P512r1()), - }, -} - -func FindByCurve(curve Curve) *CurveInfo { - for _, curveInfo := range Curves { - if curveInfo.Curve.GetCurveName() == curve.GetCurveName() { - return &curveInfo - } - } - return nil -} - -func FindByOid(oid encoding.Field) *CurveInfo { - var rawBytes = oid.Bytes() - for _, curveInfo := range Curves { - if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { - return &curveInfo - } - } - return nil -} - -func FindEdDSAByGenName(curveGenName string) EdDSACurve { - for _, curveInfo := range Curves { - if curveInfo.GenName == curveGenName { - curve, ok := curveInfo.Curve.(EdDSACurve) - if ok { - return curve - } - } - } - return nil -} - -func FindECDSAByGenName(curveGenName string) ECDSACurve { - for _, curveInfo := range Curves { - if curveInfo.GenName == curveGenName { - curve, ok := curveInfo.Curve.(ECDSACurve) - if ok { - return curve - } - } - } - return nil -} - -func FindECDHByGenName(curveGenName string) ECDHCurve { - for _, curveInfo := range Curves { - if curveInfo.GenName == curveGenName { - curve, ok := curveInfo.Curve.(ECDHCurve) - if ok { - return curve - } - } - } - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go deleted file mode 100644 index c47072b49e..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. -package ecc - -import ( - "io" - "math/big" -) - -type Curve interface { - GetCurveName() string -} - -type ECDSACurve interface { - Curve - MarshalIntegerPoint(x, y *big.Int) []byte - UnmarshalIntegerPoint([]byte) (x, y *big.Int) - MarshalIntegerSecret(d *big.Int) []byte - UnmarshalIntegerSecret(d []byte) *big.Int - GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) - Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) - Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool - ValidateECDSA(x, y *big.Int, secret []byte) error -} - -type EdDSACurve interface { - Curve - MarshalBytePoint(x []byte) []byte - UnmarshalBytePoint([]byte) (x []byte) - MarshalByteSecret(d []byte) []byte - UnmarshalByteSecret(d []byte) []byte - MarshalSignature(sig []byte) (r, s []byte) - UnmarshalSignature(r, s []byte) (sig []byte) - GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) - Sign(publicKey, privateKey, message []byte) (sig []byte, err error) - Verify(publicKey, message, sig []byte) bool - ValidateEdDSA(publicKey, privateKey []byte) (err error) -} -type ECDHCurve interface { - Curve - MarshalBytePoint([]byte) (encoded []byte) - UnmarshalBytePoint(encoded []byte) ([]byte) - MarshalByteSecret(d []byte) []byte - UnmarshalByteSecret(d []byte) []byte - GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) - Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) - Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) - ValidateECDH(public []byte, secret []byte) error -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go deleted file mode 100644 index 29f6cba9d8..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. -package ecc - -import ( - "crypto/subtle" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - ed25519lib "github.com/cloudflare/circl/sign/ed25519" -) - -const ed25519Size = 32 -type ed25519 struct {} - -func NewEd25519() *ed25519 { - return &ed25519{} -} - -func (c *ed25519) GetCurveName() string { - return "ed25519" -} - -// MarshalBytePoint encodes the public point from native format, adding the prefix. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed25519) MarshalBytePoint(x []byte) []byte { - return append([]byte{0x40}, x...) -} - -// UnmarshalBytePoint decodes a point from prefixed format to native. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) { - if len(point) != ed25519lib.PublicKeySize + 1 { - return nil - } - - // Return unprefixed - return point[1:] -} - -// MarshalByteSecret encodes a scalar in native format. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed25519) MarshalByteSecret(d []byte) []byte { - return d -} - -// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) { - if len(s) > ed25519lib.SeedSize { - return nil - } - - // Handle stripped leading zeroes - d = make([]byte, ed25519lib.SeedSize) - copy(d[ed25519lib.SeedSize - len(s):], s) - return -} - -// MarshalSignature splits a signature in R and S. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 -func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) { - return sig[:ed25519Size], sig[ed25519Size:] -} - -// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 -func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) { - // Check size - if len(r) > 32 || len(s) > 32 { - return nil - } - - sig = make([]byte, ed25519lib.SignatureSize) - - // Handle stripped leading zeroes - copy(sig[ed25519Size-len(r):ed25519Size], r) - copy(sig[ed25519lib.SignatureSize-len(s):], s) - return sig -} - -func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { - pk, sk, err := ed25519lib.GenerateKey(rand) - - if err != nil { - return nil, nil, err - } - - return pk, sk[:ed25519lib.SeedSize], nil -} - -func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { - return append(privateKey, publicKey...) -} - -func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { - sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message) - return sig, nil -} - -func (c *ed25519) Verify(publicKey, message, sig []byte) bool { - return ed25519lib.Verify(publicKey, message, sig) -} - -func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) { - priv := getEd25519Sk(publicKey, privateKey) - expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed()) - if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { - return errors.KeyInvalidError("ecc: invalid ed25519 secret") - } - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go deleted file mode 100644 index a2df3dab87..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. -package ecc - -import ( - "crypto/subtle" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - ed448lib "github.com/cloudflare/circl/sign/ed448" -) - -type ed448 struct {} - -func NewEd448() *ed448 { - return &ed448{} -} - -func (c *ed448) GetCurveName() string { - return "ed448" -} - -// MarshalBytePoint encodes the public point from native format, adding the prefix. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed448) MarshalBytePoint(x []byte) []byte { - // Return prefixed - return append([]byte{0x40}, x...) -} - -// UnmarshalBytePoint decodes a point from prefixed format to native. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) { - if len(point) != ed448lib.PublicKeySize + 1 { - return nil - } - - // Strip prefix - return point[1:] -} - -// MarshalByteSecret encoded a scalar from native format to prefixed. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed448) MarshalByteSecret(d []byte) []byte { - // Return prefixed - return append([]byte{0x40}, d...) -} - -// UnmarshalByteSecret decodes a scalar from prefixed format to native. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 -func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) { - // Check prefixed size - if len(s) != ed448lib.SeedSize + 1 { - return nil - } - - // Strip prefix - return s[1:] -} - -// MarshalSignature splits a signature in R and S, where R is in prefixed native format and -// S is an MPI with value zero. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 -func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) { - return append([]byte{0x40}, sig...), []byte{} -} - -// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 -func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) { - if len(r) != ed448lib.SignatureSize + 1 { - return nil - } - - return r[1:] -} - -func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { - pk, sk, err := ed448lib.GenerateKey(rand) - - if err != nil { - return nil, nil, err - } - - return pk, sk[:ed448lib.SeedSize], nil -} - -func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { - return append(privateKey, publicKey...) -} - -func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { - // Ed448 is used with the empty string as a context string. - // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 - sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "") - - return sig, nil -} - -func (c *ed448) Verify(publicKey, message, sig []byte) bool { - // Ed448 is used with the empty string as a context string. - // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 - return ed448lib.Verify(publicKey, message, sig, "") -} - -func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) { - priv := getEd448Sk(publicKey, privateKey) - expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed()) - if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { - return errors.KeyInvalidError("ecc: invalid ed448 secret") - } - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go deleted file mode 100644 index e28d7c7106..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go +++ /dev/null @@ -1,149 +0,0 @@ -// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. -package ecc - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "fmt" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "io" - "math/big" -) - -type genericCurve struct { - Curve elliptic.Curve -} - -func NewGenericCurve(c elliptic.Curve) *genericCurve { - return &genericCurve{ - Curve: c, - } -} - -func (c *genericCurve) GetCurveName() string { - return c.Curve.Params().Name -} - -func (c *genericCurve) MarshalBytePoint(point []byte) []byte { - return point -} - -func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte { - return point -} - -func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte { - return elliptic.Marshal(c.Curve, x, y) -} - -func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) { - return elliptic.Unmarshal(c.Curve, point) -} - -func (c *genericCurve) MarshalByteSecret(d []byte) []byte { - return d -} - -func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte { - return d -} - -func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte { - return d.Bytes() -} - -func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int { - return new(big.Int).SetBytes(d) -} - -func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) { - secret, x, y, err := elliptic.GenerateKey(c.Curve, rand) - if err != nil { - return nil, nil, err - } - - point = elliptic.Marshal(c.Curve, x, y) - return point, secret, nil -} - -func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) { - priv, err := ecdsa.GenerateKey(c.Curve, rand) - if err != nil { - return - } - - return priv.X, priv.Y, priv.D, nil -} - -func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { - xP, yP := elliptic.Unmarshal(c.Curve, point) - if xP == nil { - panic("invalid point") - } - - d, x, y, err := elliptic.GenerateKey(c.Curve, rand) - if err != nil { - return nil, nil, err - } - - vsG := elliptic.Marshal(c.Curve, x, y) - zbBig, _ := c.Curve.ScalarMult(xP, yP, d) - - byteLen := (c.Curve.Params().BitSize + 7) >> 3 - zb := make([]byte, byteLen) - zbBytes := zbBig.Bytes() - copy(zb[byteLen-len(zbBytes):], zbBytes) - - return vsG, zb, nil -} - -func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { - x, y := elliptic.Unmarshal(c.Curve, ephemeral) - zbBig, _ := c.Curve.ScalarMult(x, y, secret) - byteLen := (c.Curve.Params().BitSize + 7) >> 3 - zb := make([]byte, byteLen) - zbBytes := zbBig.Bytes() - copy(zb[byteLen-len(zbBytes):], zbBytes) - - return zb, nil -} - -func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) { - priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}} - return ecdsa.Sign(rand, priv, hash) -} - -func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool { - pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve} - return ecdsa.Verify(pub, hash, r, s) -} - -func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error { - // the public point should not be at infinity (0,0) - zero := new(big.Int) - if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 { - return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name)) - } - - // re-derive the public point Q' = (X,Y) = dG - // to compare to declared Q in public key - expectedX, expectedY := c.Curve.ScalarBaseMult(secret) - if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 { - return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) - } - - return nil -} - -func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error { - return c.validate(xP, yP, secret) -} - -func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error { - xP, yP := elliptic.Unmarshal(c.Curve, point) - if xP == nil { - return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) - } - - return c.validate(xP, yP, secret) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go deleted file mode 100644 index 4a940b4f4d..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go +++ /dev/null @@ -1,105 +0,0 @@ -// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. -package ecc - -import ( - "crypto/subtle" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - x448lib "github.com/cloudflare/circl/dh/x448" -) - -type x448 struct {} - -func NewX448() *x448 { - return &x448{} -} - -func (c *x448) GetCurveName() string { - return "x448" -} - -// MarshalBytePoint encodes the public point from native format, adding the prefix. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 -func (c *x448) MarshalBytePoint(point []byte) []byte { - return append([]byte{0x40}, point...) -} - -// UnmarshalBytePoint decodes a point from prefixed format to native. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 -func (c *x448) UnmarshalBytePoint(point []byte) []byte { - if len(point) != x448lib.Size + 1 { - return nil - } - - return point[1:] -} - -// MarshalByteSecret encoded a scalar from native format to prefixed. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 -func (c *x448) MarshalByteSecret(d []byte) []byte { - return append([]byte{0x40}, d...) -} - -// UnmarshalByteSecret decodes a scalar from prefixed format to native. -// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 -func (c *x448) UnmarshalByteSecret(d []byte) []byte { - if len(d) != x448lib.Size + 1 { - return nil - } - - // Store without prefix - return d[1:] -} - -func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) { - if _, err = rand.Read(sk[:]); err != nil { - return - } - - x448lib.KeyGen(&pk, &sk) - return -} - -func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { - priv, pub, err := c.generateKeyPairBytes(rand) - if err != nil { - return - } - - return pub[:], priv[:], nil -} - -func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { - var pk, ss x448lib.Key - seed, e, err := c.generateKeyPairBytes(rand) - - copy(pk[:], point) - x448lib.Shared(&ss, &seed, &pk) - - return e[:], ss[:], nil -} - -func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { - var ss, sk, e x448lib.Key - - copy(sk[:], secret) - copy(e[:], ephemeral) - x448lib.Shared(&ss, &sk, &e) - - return ss[:], nil -} - -func (c *x448) ValidateECDH(point []byte, secret []byte) error { - var sk, pk, expectedPk x448lib.Key - - copy(pk[:], point) - copy(sk[:], secret) - x448lib.KeyGen(&expectedPk, &sk) - - if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 { - return errors.KeyInvalidError("ecc: invalid curve25519 public point") - } - - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go deleted file mode 100644 index 6c921481b7..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package encoding implements openpgp packet field encodings as specified in -// RFC 4880 and 6637. -package encoding - -import "io" - -// Field is an encoded field of an openpgp packet. -type Field interface { - // Bytes returns the decoded data. - Bytes() []byte - - // BitLength is the size in bits of the decoded data. - BitLength() uint16 - - // EncodedBytes returns the encoded data. - EncodedBytes() []byte - - // EncodedLength is the size in bytes of the encoded data. - EncodedLength() uint16 - - // ReadFrom reads the next Field from r. - ReadFrom(r io.Reader) (int64, error) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go deleted file mode 100644 index 02e5e695c3..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package encoding - -import ( - "io" - "math/big" - "math/bits" -) - -// An MPI is used to store the contents of a big integer, along with the bit -// length that was specified in the original input. This allows the MPI to be -// reserialized exactly. -type MPI struct { - bytes []byte - bitLength uint16 -} - -// NewMPI returns a MPI initialized with bytes. -func NewMPI(bytes []byte) *MPI { - for len(bytes) != 0 && bytes[0] == 0 { - bytes = bytes[1:] - } - if len(bytes) == 0 { - bitLength := uint16(0) - return &MPI{bytes, bitLength} - } - bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) - return &MPI{bytes, bitLength} -} - -// Bytes returns the decoded data. -func (m *MPI) Bytes() []byte { - return m.bytes -} - -// BitLength is the size in bits of the decoded data. -func (m *MPI) BitLength() uint16 { - return m.bitLength -} - -// EncodedBytes returns the encoded data. -func (m *MPI) EncodedBytes() []byte { - return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) -} - -// EncodedLength is the size in bytes of the encoded data. -func (m *MPI) EncodedLength() uint16 { - return uint16(2 + len(m.bytes)) -} - -// ReadFrom reads into m the next MPI from r. -func (m *MPI) ReadFrom(r io.Reader) (int64, error) { - var buf [2]byte - n, err := io.ReadFull(r, buf[0:]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return int64(n), err - } - - m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - m.bytes = make([]byte, (int(m.bitLength)+7)/8) - - nn, err := io.ReadFull(r, m.bytes) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - - // remove leading zero bytes from malformed GnuPG encoded MPIs: - // https://bugs.gnupg.org/gnupg/issue1853 - // for _, b := range m.bytes { - // if b != 0 { - // break - // } - // m.bytes = m.bytes[1:] - // m.bitLength -= 8 - // } - - return int64(n) + int64(nn), err -} - -// SetBig initializes m with the bits from n. -func (m *MPI) SetBig(n *big.Int) *MPI { - m.bytes = n.Bytes() - m.bitLength = uint16(n.BitLen()) - return m -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go deleted file mode 100644 index c9df9fe232..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package encoding - -import ( - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// OID is used to store a variable-length field with a one-octet size -// prefix. See https://tools.ietf.org/html/rfc6637#section-9. -type OID struct { - bytes []byte -} - -const ( - // maxOID is the maximum number of bytes in a OID. - maxOID = 254 - // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC - // specifies are reserved. - reservedOIDLength1 = 0 - reservedOIDLength2 = 0xff -) - -// NewOID returns a OID initialized with bytes. -func NewOID(bytes []byte) *OID { - switch len(bytes) { - case reservedOIDLength1, reservedOIDLength2: - panic("encoding: NewOID argument length is reserved") - default: - if len(bytes) > maxOID { - panic("encoding: NewOID argument too large") - } - } - - return &OID{ - bytes: bytes, - } -} - -// Bytes returns the decoded data. -func (o *OID) Bytes() []byte { - return o.bytes -} - -// BitLength is the size in bits of the decoded data. -func (o *OID) BitLength() uint16 { - return uint16(len(o.bytes) * 8) -} - -// EncodedBytes returns the encoded data. -func (o *OID) EncodedBytes() []byte { - return append([]byte{byte(len(o.bytes))}, o.bytes...) -} - -// EncodedLength is the size in bytes of the encoded data. -func (o *OID) EncodedLength() uint16 { - return uint16(1 + len(o.bytes)) -} - -// ReadFrom reads into b the next OID from r. -func (o *OID) ReadFrom(r io.Reader) (int64, error) { - var buf [1]byte - n, err := io.ReadFull(r, buf[:]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return int64(n), err - } - - switch buf[0] { - case reservedOIDLength1, reservedOIDLength2: - return int64(n), errors.UnsupportedError("reserved for future extensions") - } - - o.bytes = make([]byte, buf[0]) - - nn, err := io.ReadFull(r, o.bytes) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - - return int64(n) + int64(nn), err -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go deleted file mode 100644 index 0e71934cd9..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - goerrors "errors" - "io" - "math/big" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/ecdsa" - "github.com/ProtonMail/go-crypto/openpgp/eddsa" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "github.com/ProtonMail/go-crypto/openpgp/packet" -) - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - keyLifetimeSecs := config.KeyLifetime() - - // Generate a primary signing key - primaryPrivRaw, err := newSigner(config) - if err != nil { - return nil, err - } - primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) - if config != nil && config.V5Keys { - primary.UpgradeToV5() - } - - e := &Entity{ - PrimaryKey: &primary.PublicKey, - PrivateKey: primary, - Identities: make(map[string]*Identity), - Subkeys: []Subkey{}, - } - - err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) - if err != nil { - return nil, err - } - - // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey() - // if the primary/master key has expired. - err = e.addEncryptionSubkey(config, creationTime, 0) - if err != nil { - return nil, err - } - - return e, nil -} - -func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { - creationTime := config.Now() - keyLifetimeSecs := config.KeyLifetime() - return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) -} - -func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return errors.InvalidArgumentError("user id field contained invalid characters") - } - - if _, ok := t.Identities[uid.Id]; ok { - return errors.InvalidArgumentError("user id exist") - } - - primary := t.PrivateKey - - isPrimaryId := len(t.Identities) == 0 - - selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) - selfSignature.CreationTime = creationTime - selfSignature.KeyLifetimeSecs = &keyLifetimeSecs - selfSignature.IsPrimaryId = &isPrimaryId - selfSignature.FlagsValid = true - selfSignature.FlagSign = true - selfSignature.FlagCertify = true - selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 - selfSignature.SEIPDv2 = config.AEAD() != nil - - // Set the PreferredHash for the SelfSignature from the packet.Config. - // If it is not the must-implement algorithm from rfc4880bis, append that. - hash, ok := algorithm.HashToHashId(config.Hash()) - if !ok { - return errors.UnsupportedError("unsupported preferred hash function") - } - - selfSignature.PreferredHash = []uint8{hash} - if config.Hash() != crypto.SHA256 { - selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) - } - - // Likewise for DefaultCipher. - selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} - if config.Cipher() != packet.CipherAES128 { - selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) - } - - // We set CompressionNone as the preferred compression algorithm because - // of compression side channel attacks, then append the configured - // DefaultCompressionAlgo if any is set (to signal support for cases - // where the application knows that using compression is safe). - selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} - if config.Compression() != packet.CompressionNone { - selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) - } - - // And for DefaultMode. - modes := []uint8{uint8(config.AEAD().Mode())} - if config.AEAD().Mode() != packet.AEADModeOCB { - modes = append(modes, uint8(packet.AEADModeOCB)) - } - - // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) - for _, cipher := range selfSignature.PreferredSymmetric { - for _, mode := range modes { - selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) - } - } - - // User ID binding signature - err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) - if err != nil { - return err - } - t.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: selfSignature, - Signatures: []*packet.Signature{selfSignature}, - } - return nil -} - -// AddSigningSubkey adds a signing keypair as a subkey to the Entity. -// If config is nil, sensible defaults will be used. -func (e *Entity) AddSigningSubkey(config *packet.Config) error { - creationTime := config.Now() - keyLifetimeSecs := config.KeyLifetime() - - subPrivRaw, err := newSigner(config) - if err != nil { - return err - } - sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) - sub.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() - } - - subkey := Subkey{ - PublicKey: &sub.PublicKey, - PrivateKey: sub, - } - subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) - subkey.Sig.CreationTime = creationTime - subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs - subkey.Sig.FlagsValid = true - subkey.Sig.FlagSign = true - subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config) - subkey.Sig.EmbeddedSignature.CreationTime = creationTime - - err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) - if err != nil { - return err - } - - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return err - } - - e.Subkeys = append(e.Subkeys, subkey) - return nil -} - -// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. -// If config is nil, sensible defaults will be used. -func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { - creationTime := config.Now() - keyLifetimeSecs := config.KeyLifetime() - return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs) -} - -func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { - subPrivRaw, err := newDecrypter(config) - if err != nil { - return err - } - sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) - sub.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() - } - - subkey := Subkey{ - PublicKey: &sub.PublicKey, - PrivateKey: sub, - } - subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) - subkey.Sig.CreationTime = creationTime - subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs - subkey.Sig.FlagsValid = true - subkey.Sig.FlagEncryptStorage = true - subkey.Sig.FlagEncryptCommunications = true - - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return err - } - - e.Subkeys = append(e.Subkeys, subkey) - return nil -} - -// Generates a signing key -func newSigner(config *packet.Config) (signer interface{}, err error) { - switch config.PublicKeyAlgorithm() { - case packet.PubKeyAlgoRSA: - bits := config.RSAModulusBits() - if bits < 1024 { - return nil, errors.InvalidArgumentError("bits must be >= 1024") - } - if config != nil && len(config.RSAPrimes) >= 2 { - primes := config.RSAPrimes[0:2] - config.RSAPrimes = config.RSAPrimes[2:] - return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) - } - return rsa.GenerateKey(config.Random(), bits) - case packet.PubKeyAlgoEdDSA: - curve := ecc.FindEdDSAByGenName(string(config.CurveName())) - if curve == nil { - return nil, errors.InvalidArgumentError("unsupported curve") - } - - priv, err := eddsa.GenerateKey(config.Random(), curve) - if err != nil { - return nil, err - } - return priv, nil - case packet.PubKeyAlgoECDSA: - curve := ecc.FindECDSAByGenName(string(config.CurveName())) - if curve == nil { - return nil, errors.InvalidArgumentError("unsupported curve") - } - - priv, err := ecdsa.GenerateKey(config.Random(), curve) - if err != nil { - return nil, err - } - return priv, nil - default: - return nil, errors.InvalidArgumentError("unsupported public key algorithm") - } -} - -// Generates an encryption/decryption key -func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { - switch config.PublicKeyAlgorithm() { - case packet.PubKeyAlgoRSA: - bits := config.RSAModulusBits() - if bits < 1024 { - return nil, errors.InvalidArgumentError("bits must be >= 1024") - } - if config != nil && len(config.RSAPrimes) >= 2 { - primes := config.RSAPrimes[0:2] - config.RSAPrimes = config.RSAPrimes[2:] - return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) - } - return rsa.GenerateKey(config.Random(), bits) - case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: - fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey - case packet.PubKeyAlgoECDH: - var kdf = ecdh.KDF{ - Hash: algorithm.SHA512, - Cipher: algorithm.AES256, - } - curve := ecc.FindECDHByGenName(string(config.CurveName())) - if curve == nil { - return nil, errors.InvalidArgumentError("unsupported curve") - } - return ecdh.GenerateKey(config.Random(), curve, kdf) - default: - return nil, errors.InvalidArgumentError("unsupported public key algorithm") - } -} - -var bigOne = big.NewInt(1) - -// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the -// given bit size, using the given random source and prepopulated primes. -func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { - priv := new(rsa.PrivateKey) - priv.E = 65537 - - if nprimes < 2 { - return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") - } - - if bits < 1024 { - return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") - } - - primes := make([]*big.Int, nprimes) - -NextSetOfPrimes: - for { - todo := bits - // crypto/rand should set the top two bits in each prime. - // Thus each prime has the form - // p_i = 2^bitlen(p_i) × 0.11... (in base 2). - // And the product is: - // P = 2^todo × α - // where α is the product of nprimes numbers of the form 0.11... - // - // If α < 1/2 (which can happen for nprimes > 2), we need to - // shift todo to compensate for lost bits: the mean value of 0.11... - // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 - // will give good results. - if nprimes >= 7 { - todo += (nprimes - 2) / 5 - } - for i := 0; i < nprimes; i++ { - var err error - if len(prepopulatedPrimes) == 0 { - primes[i], err = rand.Prime(random, todo/(nprimes-i)) - if err != nil { - return nil, err - } - } else { - primes[i] = prepopulatedPrimes[0] - prepopulatedPrimes = prepopulatedPrimes[1:] - } - - todo -= primes[i].BitLen() - } - - // Make sure that primes is pairwise unequal. - for i, prime := range primes { - for j := 0; j < i; j++ { - if prime.Cmp(primes[j]) == 0 { - continue NextSetOfPrimes - } - } - } - - n := new(big.Int).Set(bigOne) - totient := new(big.Int).Set(bigOne) - pminus1 := new(big.Int) - for _, prime := range primes { - n.Mul(n, prime) - pminus1.Sub(prime, bigOne) - totient.Mul(totient, pminus1) - } - if n.BitLen() != bits { - // This should never happen for nprimes == 2 because - // crypto/rand should set the top two bits in each prime. - // For nprimes > 2 we hope it does not happen often. - continue NextSetOfPrimes - } - - priv.D = new(big.Int) - e := big.NewInt(int64(priv.E)) - ok := priv.D.ModInverse(e, totient) - - if ok != nil { - priv.Primes = primes - priv.N = n - break - } - } - - priv.Precompute() - return priv, nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go deleted file mode 100644 index 120f081ada..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ /dev/null @@ -1,804 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - goerrors "errors" - "io" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/armor" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Revocations []*packet.Signature - Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature - Revocations []*packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature - Revocations []*packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// PrimaryIdentity returns an Identity, preferring non-revoked identities, -// identities marked as primary, or the latest-created identity, in that order. -func (e *Entity) PrimaryIdentity() *Identity { - var primaryIdentity *Identity - for _, ident := range e.Identities { - if shouldPreferIdentity(primaryIdentity, ident) { - primaryIdentity = ident - } - } - return primaryIdentity -} - -func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { - if existingId == nil { - return true - } - - if len(existingId.Revocations) > len(potentialNewId.Revocations) { - return true - } - - if len(existingId.Revocations) < len(potentialNewId.Revocations) { - return false - } - - if existingId.SelfSignature == nil { - return true - } - - if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && - !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { - return false - } - - if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && - potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId { - return true - } - - return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) -} - -// EncryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { - // Fail to find any encryption key if the... - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired - e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked - return Key{}, false - } - - // Iterate the keys to find the newest, unexpired one - candidateSubkey := -1 - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.PublicKey.KeyExpired(subkey.Sig, now) && - !subkey.Sig.SigExpired(now) && - !subkey.Revoked(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt with, then we can obviously use it. - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true - } - - return Key{}, false -} - - -// CertificationKey return the best candidate Key for certifying a key with this -// Entity. -func (e *Entity) CertificationKey(now time.Time) (Key, bool) { - return e.CertificationKeyById(now, 0) -} - -// CertificationKeyById return the Key for key certification with this -// Entity and keyID. -func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) { - return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify) -} - -// SigningKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) SigningKey(now time.Time) (Key, bool) { - return e.SigningKeyById(now, 0) -} - -// SigningKeyById return the Key for signing a message with this -// Entity and keyID. -func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { - return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign) -} - -func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { - // Fail to find any signing key if the... - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired - e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked - return Key{}, false - } - - // Iterate the keys to find the newest, unexpired one - candidateSubkey := -1 - var maxTime time.Time - for idx, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - (flags & packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) && - (flags & packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.PublicKey.KeyExpired(subkey.Sig, now) && - !subkey.Sig.SigExpired(now) && - !subkey.Revoked(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && - (id == 0 || subkey.PublicKey.KeyId == id) { - candidateSubkey = idx - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. Or, if the primary key is marked as ok to - // sign with, then we can use it. - if !i.SelfSignature.FlagsValid || ( - (flags & packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) && - (flags & packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign)) && - e.PrimaryKey.PubKeyAlgo.CanSign() && - (id == 0 || e.PrimaryKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true - } - - // No keys with a valid Signing Flag or no keys matched the id passed in - return Key{}, false -} - -func revoked(revocations []*packet.Signature, now time.Time) bool { - for _, revocation := range revocations { - if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { - // If the key is compromised, the key is considered revoked even before the revocation date. - return true - } - if !revocation.SigExpired(now) { - return true - } - } - return false -} - -// Revoked returns whether the entity has any direct key revocation signatures. -// Note that third-party revocation signatures are not supported. -// Note also that Identity and Subkey revocation should be checked separately. -func (e *Entity) Revoked(now time.Time) bool { - return revoked(e.Revocations, now) -} - -// Revoked returns whether the identity has been revoked by a self-signature. -// Note that third-party revocation signatures are not supported. -func (i *Identity) Revoked(now time.Time) bool { - return revoked(i.Revocations, now) -} - -// Revoked returns whether the subkey has been revoked by a self-signature. -// Note that third-party revocation signatures are not supported. -func (s *Subkey) Revoked(now time.Time) bool { - return revoked(s.Revocations, now) -} - -// Revoked returns whether the key or subkey has been revoked by a self-signature. -// Note that third-party revocation signatures are not supported. -// Note also that Identity revocation should be checked separately. -// Normally, it's not necessary to call this function, except on keys returned by -// KeysById or KeysByIdUsage. -func (key *Key) Revoked(now time.Time) bool { - return revoked(key.Revocations, now) -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - ident := e.PrimaryIdentity() - selfSig := ident.SelfSignature - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if key.SelfSignature != nil && key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeGenericCert && - sig.SigType != packet.SigTypePersonaCert && - sig.SigType != packet.SigTypeCasualCert && - sig.SigType != packet.SigTypePositiveCert && - sig.SigType != packet.SigTypeCertificationRevocation { - return errors.StructuralError("user ID signature with wrong type") - } - - if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - if sig.SigType == packet.SigTypeCertificationRevocation { - identity.Revocations = append(identity.Revocations, sig) - } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { - identity.SelfSignature = sig - } - identity.Signatures = append(identity.Signatures, sig) - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Revocations = append(subKey.Revocations, sig) - case packet.SigTypeSubkeyBinding: - if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - if e.PrivateKey.Dummy() { - return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") - } - return e.serializePrivate(w, config, true) -} - -// SerializePrivateWithoutSigning serializes an Entity, including private key -// material, but excluding signatures from other entities, to the given Writer. -// Self-signatures of identities and subkeys are not re-signed. This is useful -// when serializing GNU dummy keys, among other things. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { - return e.serializePrivate(w, config, false) -} - -func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { - if e.PrivateKey == nil { - return goerrors.New("openpgp: private key is missing") - } - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, revocation := range e.Revocations { - err := revocation.Serialize(w) - if err != nil { - return err - } - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - if reSign { - if ident.SelfSignature == nil { - return goerrors.New("openpgp: can't re-sign identity without valid self-signature") - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - if reSign { - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - if subkey.Sig.EmbeddedSignature != nil { - err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, - subkey.PrivateKey, config) - if err != nil { - return - } - } - } - for _, revocation := range subkey.Revocations { - err := revocation.Serialize(w) - if err != nil { - return err - } - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, revocation := range e.Revocations { - err := revocation.Serialize(w) - if err != nil { - return err - } - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - for _, revocation := range subkey.Revocations { - err := revocation.Serialize(w) - if err != nil { - return err - } - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - certificationKey, ok := signer.CertificationKey(config.Now()) - if !ok { - return errors.InvalidArgumentError("no valid certification key found") - } - - if certificationKey.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config) - - signingUserID := config.SigningUserId() - if signingUserID != "" { - if _, ok := signer.Identities[signingUserID]; !ok { - return errors.InvalidArgumentError("signer identity string not found in signer Entity") - } - sig.SignerUserId = &signingUserID - } - - if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} - -// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the -// specified reason code and text (RFC4880 section-5.2.3.23). -// If config is nil, sensible defaults will be used. -func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config) - revSig.RevocationReason = &reason - revSig.RevocationReasonText = reasonText - - if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { - return err - } - e.Revocations = append(e.Revocations, revSig) - return nil -} - -// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for -// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). -// If config is nil, sensible defaults will be used. -func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { - return errors.InvalidArgumentError("given subkey is not associated with this key") - } - - revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config) - revSig.RevocationReason = &reason - revSig.RevocationReasonText = reasonText - - if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { - return err - } - - sk.Revocations = append(sk.Revocations, revSig) - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go deleted file mode 100644 index 108fd096f3..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go +++ /dev/null @@ -1,538 +0,0 @@ -package openpgp - -const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" -const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" -const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" -const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" - -const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Charset: UTF-8 - -mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY -ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG -zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 -QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ -QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo -9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu -Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ -dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R -JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL -ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew -RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW -/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu -yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv -2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR -bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL -C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP -WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y -MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA -EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ -MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N -1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm -+ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N -lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW -CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF -4artDmrG -=7FfJ ------END PGP PUBLIC KEY BLOCK-----` - -const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY -ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG -zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 -QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ -QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo -9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu -Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ -dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R -JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL -ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew -RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW -/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu -yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ -UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe -iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK -FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 -R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh -+SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA -EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO -52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb -u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl -w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep -54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ -YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL -bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E -i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB -DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 -8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY -s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 -U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL -6LCg2mg= -=Dhm4 ------END PGP PUBLIC KEY BLOCK-----` - -const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo -7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom -lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 -E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC -CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw -6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH -7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv -X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 -GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl -y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw -R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW -CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ -LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO -aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx -yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl -BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr -Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK -CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp -C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ -SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ -MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= -=vtbN ------END PGP PUBLIC KEY BLOCK-----` - -const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e -DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ -uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW -ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx -nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ -x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg -PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy -9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ -1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 -depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl -aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 -DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa -XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU -8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 -b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD -BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG -0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N -s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb -tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 -BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE -/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 -kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z -VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa -PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ -snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi -bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 -K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X -8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ -TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb -OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l -QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V -yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U -heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB -7qTZOahrETw= -=IKnw ------END PGP PUBLIC KEY BLOCK-----` - -const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: OpenPGP.js v4.10.10 -Comment: https://openpgpjs.org - -xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q -lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN -91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO -XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb -naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX -8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB -AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK -ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS -gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT -6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ -q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy -+I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY -bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j -SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk -kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu -Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC -GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW -IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 -mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG -LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK -zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ -BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ -aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD -rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm -yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 -UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb -YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt -amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM -u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ -Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS -gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB -cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N -nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN -kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF -I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb -hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z -FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= -=+2T8 ------END PGP PUBLIC KEY BLOCK----- -` - -const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll -JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ -iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 -yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e -/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI -q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V -xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw -ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB -B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh -BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u -8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA -mcG3/TwG5GSQUUPkrDsGDA== -=mFWy ------END PGP PUBLIC KEY BLOCK----- -` - -const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR -s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL -EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 -I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ -lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe -AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 -2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 -69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 -Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b -wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW -FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR -AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM -vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx -22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj -7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY -AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 -tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX -JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl -mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 -8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC -0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b -4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl -Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY -=3fWu ------END PGP PUBLIC KEY BLOCK-----` - -const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L -plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz -r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 -I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ -sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe -AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ -3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm -k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s -GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G -HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT -CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR -AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN -MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j -UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS -YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs -nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ -U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es -HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK -lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg -AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV -UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM -0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX -12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa -3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi -wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 -Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G -1dh1WoUCyREZsJQg2YoIpWIcvw+a -=bNRo ------END PGP PUBLIC KEY BLOCK----- -` - -const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1 - -lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 -GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 -I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB -/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ -CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl -rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ -6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD -ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX -M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED -Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f -yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk -UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH -e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl -AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r -oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo -UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS -YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ -kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC -WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW -n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 -/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 -R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE -VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix -7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== -=F/T0 ------END PGP PRIVATE KEY BLOCK-----` - -const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- - -xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii -B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS -PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 -lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA -aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD -Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v -Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S -rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH -AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA -iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ -Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ -AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou -b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== -=KLN8 ------END PGP PRIVATE KEY BLOCK-----` - -const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- - -lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd -oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk -/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ -gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r -njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU -iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK -Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m -kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR -MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI -zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A -rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP -CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr -MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c -bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd -hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ -3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS -ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL -Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 -Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz -YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ -aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r -sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV -JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 -EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt -Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw -vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ -iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg -UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y -HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL -Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy -bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP -Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic -BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 -v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV -oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c -wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ -aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R -fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa -4g== -=XZm8 ------END PGP PRIVATE KEY BLOCK-----` - -// https://tests.sequoia-pgp.org/#Certificate_expiration -// P _ U p -const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv -/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz -/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ -5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 -X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv -9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 -qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb -SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb -vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w -bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe -ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY -495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ -mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t -yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG -Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN -/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm -pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR -eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH -DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 -ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ -UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT -74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 -ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ -i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj -LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ -iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc -selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n -TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ -Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm -bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk -jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 -6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// -rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm -U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR -LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw -sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx -1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld -I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== -=AmgT ------END PGP PUBLIC KEY BLOCK-----` - -const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 - -lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP -kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p -oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr -owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a -DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT -y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX -FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j -bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab -k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E -K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L -sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV -+RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB -cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1 -0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr -4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68 -VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe -88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/ -sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg -/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru -J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY -VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO -BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ -CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE -89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC -sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+ -ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk -b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF -r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D -xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4 -pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B -OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or -fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8 -JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8 -Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9 -hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL -L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO -apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf -Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM -RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC -JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7 -5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq -8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht -crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W -skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC -92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/ -QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl -J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu -DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w -tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM -AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM -8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH -5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB -cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1 -HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC -C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA= -=u442 ------END PGP PRIVATE KEY BLOCK-----` - -const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 - -lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH -X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh -bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo -h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK -CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA -8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV -AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+ -sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5 -qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf -iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ -SRXn8DmCTfEB -=cELM ------END PGP PRIVATE KEY BLOCK-----` - -const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52 - -xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV -MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH -gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD -BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv -bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY -akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J -qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b -mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe -c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ -BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx -dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI -ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq -RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA -8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ -YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn -M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU -EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa -HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA -bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ -EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz -YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7 -UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq -OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2 -JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb -PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i -U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO -Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK -1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF -CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA -MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb -huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA -HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB -QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD -l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02 -XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d -QgqsfguR1PqPuJxpXV4bSr6CGAAAAA== -=MSvh ------END PGP PRIVATE KEY BLOCK-----` - -const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK----- - -xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR -ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u -zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI -CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA -AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B -FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG -wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5 -8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT -z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X -joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F -RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev -VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP -=Z8YJ ------END PGP PRIVATE KEY BLOCK----- -` diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go deleted file mode 100644 index fec41a0e73..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -package packet - -import "math/bits" - -// CipherSuite contains a combination of Cipher and Mode -type CipherSuite struct { - // The cipher function - Cipher CipherFunction - // The AEAD mode of operation. - Mode AEADMode -} - -// AEADConfig collects a number of AEAD parameters along with sensible defaults. -// A nil AEADConfig is valid and results in all default values. -type AEADConfig struct { - // The AEAD mode of operation. - DefaultMode AEADMode - // Amount of octets in each chunk of data - ChunkSize uint64 -} - -// Mode returns the AEAD mode of operation. -func (conf *AEADConfig) Mode() AEADMode { - // If no preference is specified, OCB is used (which is mandatory to implement). - if conf == nil || conf.DefaultMode == 0 { - return AEADModeOCB - } - - mode := conf.DefaultMode - if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM { - panic("AEAD mode unsupported") - } - return mode -} - -// ChunkSizeByte returns the byte indicating the chunk size. The effective -// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) -// limit to 16 = 4 MiB -// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 -func (conf *AEADConfig) ChunkSizeByte() byte { - if conf == nil || conf.ChunkSize == 0 { - return 12 // 1 << (12 + 6) == 262144 bytes - } - - chunkSize := conf.ChunkSize - exponent := bits.Len64(chunkSize) - 1 - switch { - case exponent < 6: - exponent = 6 - case exponent > 16: - exponent = 16 - } - - return byte(exponent - 6) -} - -// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the -// maximum returned value is 1 << 30. -func decodeAEADChunkSize(c byte) int { - size := uint64(1 << (c + 6)) - if size != uint64(int(size)) { - return 1 << 30 - } - return int(size) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go deleted file mode 100644 index a82b040bdd..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -package packet - -import ( - "bytes" - "crypto/cipher" - "encoding/binary" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption. -type aeadCrypter struct { - aead cipher.AEAD - chunkSize int - initialNonce []byte - associatedData []byte // Chunk-independent associated data - chunkIndex []byte // Chunk counter - packetTag packetType - bytesProcessed int // Amount of plaintext bytes encrypted/decrypted - buffer bytes.Buffer // Buffered bytes across chunks -} - -// computeNonce takes the incremental index and computes an eXclusive OR with -// the least significant 8 bytes of the receivers' initial nonce (see sec. -// 5.16.1 and 5.16.2). It returns the resulting nonce. -func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { - if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { - return append(wo.initialNonce, wo.chunkIndex...) - } - - nonce = make([]byte, len(wo.initialNonce)) - copy(nonce, wo.initialNonce) - offset := len(wo.initialNonce) - 8 - for i := 0; i < 8; i++ { - nonce[i+offset] ^= wo.chunkIndex[i] - } - return -} - -// incrementIndex performs an integer increment by 1 of the integer represented by the -// slice, modifying it accordingly. -func (wo *aeadCrypter) incrementIndex() error { - index := wo.chunkIndex - if len(index) == 0 { - return errors.AEADError("Index has length 0") - } - for i := len(index) - 1; i >= 0; i-- { - if index[i] < 255 { - index[i]++ - return nil - } - index[i] = 0 - } - return errors.AEADError("cannot further increment index") -} - -// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when -// necessary, similar to aeadEncrypter. -type aeadDecrypter struct { - aeadCrypter // Embedded ciphertext opener - reader io.Reader // 'reader' is a partialLengthReader - peekedBytes []byte // Used to detect last chunk - eof bool -} - -// Read decrypts bytes and reads them into dst. It decrypts when necessary and -// buffers extra decrypted bytes. It returns the number of bytes copied into dst -// and an error. -func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { - // Return buffered plaintext bytes from previous calls - if ar.buffer.Len() > 0 { - return ar.buffer.Read(dst) - } - - // Return EOF if we've previously validated the final tag - if ar.eof { - return 0, io.EOF - } - - // Read a chunk - tagLen := ar.aead.Overhead() - cipherChunkBuf := new(bytes.Buffer) - _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen)) - cipherChunk := cipherChunkBuf.Bytes() - if errRead != nil && errRead != io.EOF { - return 0, errRead - } - decrypted, errChunk := ar.openChunk(cipherChunk) - if errChunk != nil { - return 0, errChunk - } - - // Return decrypted bytes, buffering if necessary - if len(dst) < len(decrypted) { - n = copy(dst, decrypted[:len(dst)]) - ar.buffer.Write(decrypted[len(dst):]) - } else { - n = copy(dst, decrypted) - } - - // Check final authentication tag - if errRead == io.EOF { - errChunk := ar.validateFinalTag(ar.peekedBytes) - if errChunk != nil { - return n, errChunk - } - ar.eof = true // Mark EOF for when we've returned all buffered data - } - return -} - -// Close is noOp. The final authentication tag of the stream was already -// checked in the last Read call. In the future, this function could be used to -// wipe the reader and peeked, decrypted bytes, if necessary. -func (ar *aeadDecrypter) Close() (err error) { - return nil -} - -// openChunk decrypts and checks integrity of an encrypted chunk, returning -// the underlying plaintext and an error. It accesses peeked bytes from next -// chunk, to identify the last chunk and decrypt/validate accordingly. -func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { - tagLen := ar.aead.Overhead() - // Restore carried bytes from last call - chunkExtra := append(ar.peekedBytes, data...) - // 'chunk' contains encrypted bytes, followed by an authentication tag. - chunk := chunkExtra[:len(chunkExtra)-tagLen] - ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] - - adata := ar.associatedData - if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { - adata = append(ar.associatedData, ar.chunkIndex...) - } - - nonce := ar.computeNextNonce() - plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) - if err != nil { - return nil, err - } - ar.bytesProcessed += len(plainChunk) - if err = ar.aeadCrypter.incrementIndex(); err != nil { - return nil, err - } - return plainChunk, nil -} - -// Checks the summary tag. It takes into account the total decrypted bytes into -// the associated data. It returns an error, or nil if the tag is valid. -func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { - // Associated: tag, version, cipher, aead, chunk size, ... - amountBytes := make([]byte, 8) - binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) - - adata := ar.associatedData - if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { - // ... index ... - adata = append(ar.associatedData, ar.chunkIndex...) - } - - // ... and total number of encrypted octets - adata = append(adata, amountBytes...) - nonce := ar.computeNextNonce() - _, err := ar.aead.Open(nil, nonce, tag, adata) - if err != nil { - return err - } - return nil -} - -// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according -// to the AEAD block size, and buffers the extra encrypted bytes for next write. -type aeadEncrypter struct { - aeadCrypter // Embedded plaintext sealer - writer io.WriteCloser // 'writer' is a partialLengthWriter -} - - -// Write encrypts and writes bytes. It encrypts when necessary and buffers extra -// plaintext bytes for next call. When the stream is finished, Close() MUST be -// called to append the final tag. -func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { - // Append plaintextBytes to existing buffered bytes - n, err = aw.buffer.Write(plaintextBytes) - if err != nil { - return n, err - } - // Encrypt and write chunks - for aw.buffer.Len() >= aw.chunkSize { - plainChunk := aw.buffer.Next(aw.chunkSize) - encryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return n, err - } - _, err = aw.writer.Write(encryptedChunk) - if err != nil { - return n, err - } - } - return -} - -// Close encrypts and writes the remaining buffered plaintext if any, appends -// the final authentication tag, and closes the embedded writer. This function -// MUST be called at the end of a stream. -func (aw *aeadEncrypter) Close() (err error) { - // Encrypt and write a chunk if there's buffered data left, or if we haven't - // written any chunks yet. - if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { - plainChunk := aw.buffer.Bytes() - lastEncryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return err - } - _, err = aw.writer.Write(lastEncryptedChunk) - if err != nil { - return err - } - } - // Compute final tag (associated data: packet tag, version, cipher, aead, - // chunk size... - adata := aw.associatedData - - if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { - // ... index ... - adata = append(aw.associatedData, aw.chunkIndex...) - } - - // ... and total number of encrypted octets - amountBytes := make([]byte, 8) - binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed)) - adata = append(adata, amountBytes...) - - nonce := aw.computeNextNonce() - finalTag := aw.aead.Seal(nil, nonce, nil, adata) - _, err = aw.writer.Write(finalTag) - if err != nil { - return err - } - return aw.writer.Close() -} - -// sealChunk Encrypts and authenticates the given chunk. -func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { - if len(data) > aw.chunkSize { - return nil, errors.AEADError("chunk exceeds maximum length") - } - if aw.associatedData == nil { - return nil, errors.AEADError("can't seal without headers") - } - adata := aw.associatedData - if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { - adata = append(aw.associatedData, aw.chunkIndex...) - } - - nonce := aw.computeNextNonce() - encrypted := aw.aead.Seal(nil, nonce, data, adata) - aw.bytesProcessed += len(data) - if err := aw.aeadCrypter.incrementIndex(); err != nil { - return nil, err - } - return encrypted, nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go deleted file mode 100644 index 98bd876bf2..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -package packet - -import ( - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" -) - -// AEADEncrypted represents an AEAD Encrypted Packet. -// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t -type AEADEncrypted struct { - cipher CipherFunction - mode AEADMode - chunkSizeByte byte - Contents io.Reader // Encrypted chunks and tags - initialNonce []byte // Referred to as IV in RFC4880-bis -} - -// Only currently defined version -const aeadEncryptedVersion = 1 - -func (ae *AEADEncrypted) parse(buf io.Reader) error { - headerData := make([]byte, 4) - if n, err := io.ReadFull(buf, headerData); n < 4 { - return errors.AEADError("could not read aead header:" + err.Error()) - } - // Read initial nonce - mode := AEADMode(headerData[2]) - nonceLen := mode.IvLength() - - // This packet supports only EAX and OCB - // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t - if nonceLen == 0 || mode > AEADModeOCB { - return errors.AEADError("unknown mode") - } - - initialNonce := make([]byte, nonceLen) - if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { - return errors.AEADError("could not read aead nonce:" + err.Error()) - } - ae.Contents = buf - ae.initialNonce = initialNonce - c := headerData[1] - if _, ok := algorithm.CipherById[c]; !ok { - return errors.UnsupportedError("unknown cipher: " + string(c)) - } - ae.cipher = CipherFunction(c) - ae.mode = mode - ae.chunkSizeByte = headerData[3] - return nil -} - -// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or -// an error. -func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { - return ae.decrypt(key) -} - -// decrypt prepares an aeadCrypter and returns a ReadCloser from which -// decrypted bytes can be read (see aeadDecrypter.Read()). -func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { - blockCipher := ae.cipher.new(key) - aead := ae.mode.new(blockCipher) - // Carry the first tagLen bytes - tagLen := ae.mode.TagLength() - peekedBytes := make([]byte, tagLen) - n, err := io.ReadFull(ae.Contents, peekedBytes) - if n < tagLen || (err != nil && err != io.EOF) { - return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) - } - chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) - return &aeadDecrypter{ - aeadCrypter: aeadCrypter{ - aead: aead, - chunkSize: chunkSize, - initialNonce: ae.initialNonce, - associatedData: ae.associatedData(), - chunkIndex: make([]byte, 8), - packetTag: packetTypeAEADEncrypted, - }, - reader: ae.Contents, - peekedBytes: peekedBytes}, nil -} - -// associatedData for chunks: tag, version, cipher, mode, chunk size byte -func (ae *AEADEncrypted) associatedData() []byte { - return []byte{ - 0xD4, - aeadEncryptedVersion, - byte(ae.cipher), - byte(ae.mode), - ae.chunkSizeByte} -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go deleted file mode 100644 index 2f5cad71da..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 0: - c.Body = r - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go deleted file mode 100644 index 82ae539981..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "math/big" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int - // The public key algorithm to use - will always create a signing primary - // key and encryption subkey. - Algorithm PublicKeyAlgorithm - // Some known primes that are optionally prepopulated by the caller - RSAPrimes []*big.Int - // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA, - // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used. - Curve Curve - // AEADConfig configures the use of the new AEAD Encrypted Data Packet, - // defined in the draft of the next version of the OpenPGP specification. - // If a non-nil AEADConfig is passed, usage of this packet is enabled. By - // default, it is disabled. See the documentation of AEADConfig for more - // configuration options related to AEAD. - // **Note: using this option may break compatibility with other OpenPGP - // implementations, as well as future versions of this library.** - AEADConfig *AEADConfig - // V5Keys configures version 5 key generation. If false, this package still - // supports version 5 keys, but produces version 4 keys. - V5Keys bool - // "The validity period of the key. This is the number of seconds after - // the key creation time that the key expires. If this is not present - // or has a value of zero, the key never expires. This is found only on - // a self-signature."" - // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 - KeyLifetimeSecs uint32 - // "The validity period of the signature. This is the number of seconds - // after the signature creation time that the signature expires. If - // this is not present or has a value of zero, it never expires." - // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 - SigLifetimeSecs uint32 - // SigningKeyId is used to specify the signing key to use (by Key ID). - // By default, the signing key is selected automatically, preferring - // signing subkeys if available. - SigningKeyId uint64 - // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28) - // when producing a generic certification signature onto an existing user ID. - // The identity must be present in the signer Entity. - SigningIdentity string - // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read - // encrypted messages without Modification Detection Code (MDC). - // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented - // in most OpenPGP implementations. Messages without MDC are considered unnecessarily - // insecure and should be prevented whenever possible. - // In case one needs to deal with messages from very old OpenPGP implementations, there - // might be no other way than to tolerate the missing MDC. Setting this flag, allows this - // mode of operation. It should be considered a measure of last resort. - InsecureAllowUnauthenticatedMessages bool - // KnownNotations is a map of Notation Data names to bools, which controls - // the notation names that are allowed to be present in critical Notation Data - // signature subpackets. - KnownNotations map[string]bool - // SignatureNotations is a list of Notations to be added to any signatures. - SignatureNotations []*Notation -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -// KeyLifetime returns the validity period of the key. -func (c *Config) KeyLifetime() uint32 { - if c == nil { - return 0 - } - return c.KeyLifetimeSecs -} - -// SigLifetime returns the validity period of the signature. -func (c *Config) SigLifetime() uint32 { - if c == nil { - return 0 - } - return c.SigLifetimeSecs -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} - -func (c *Config) RSAModulusBits() int { - if c == nil || c.RSABits == 0 { - return 2048 - } - return c.RSABits -} - -func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { - if c == nil || c.Algorithm == 0 { - return PubKeyAlgoRSA - } - return c.Algorithm -} - -func (c *Config) CurveName() Curve { - if c == nil || c.Curve == "" { - return Curve25519 - } - return c.Curve -} - -func (c *Config) AEAD() *AEADConfig { - if c == nil { - return nil - } - return c.AEADConfig -} - -func (c *Config) SigningKey() uint64 { - if c == nil { - return 0 - } - return c.SigningKeyId -} - -func (c *Config) SigningUserId() string { - if c == nil { - return "" - } - return c.SigningIdentity -} - -func (c *Config) AllowUnauthenticatedMessages() bool { - if c == nil { - return false - } - return c.InsecureAllowUnauthenticatedMessages -} - -func (c *Config) KnownNotation(notationName string) bool { - if c == nil { - return false - } - return c.KnownNotations[notationName] -} - -func (c *Config) Notations() []*Notation { - if c == nil { - return nil - } - return c.SignatureNotations -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index eeff2902c1..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/elgamal" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 encoding.Field -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1 = new(encoding.MPI) - if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1 = new(encoding.MPI) - if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { - return - } - - e.encryptedMPI2 = new(encoding.MPI) - if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { - return - } - case PubKeyAlgoECDH: - e.encryptedMPI1 = new(encoding.MPI) - if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { - return - } - - e.encryptedMPI2 = new(encoding.OID) - if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - if e.KeyId != 0 && e.KeyId != priv.KeyId { - return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) - } - if e.Algo != priv.PubKeyAlgo { - return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - // Supports both *rsa.PrivateKey and crypto.Decrypter - k := priv.PrivateKey.(crypto.Decrypter) - b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - case PubKeyAlgoECDH: - vsG := e.encryptedMPI1.Bytes() - m := e.encryptedMPI2.Bytes() - oid := priv.PublicKey.oid.EncodedBytes() - b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) - default: - err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - if !e.CipherFunc.IsSupported() { - return errors.UnsupportedError("unsupported encryption function") - } - - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = int(e.encryptedMPI1.EncodedLength()) - case PubKeyAlgoElGamal: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) - case PubKeyAlgoECDH: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - if err != nil { - return err - } - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - _, err := w.Write(e.encryptedMPI1.EncodedBytes()) - return err - case PubKeyAlgoElGamal: - if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { - return err - } - _, err := w.Write(e.encryptedMPI2.EncodedBytes()) - return err - case PubKeyAlgoECDH: - if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { - return err - } - _, err := w.Write(e.encryptedMPI2.EncodedBytes()) - return err - default: - panic("internal error") - } -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoECDH: - return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - cipherMPI := encoding.NewMPI(cipherText) - packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - _, err = w.Write(cipherMPI.EncodedBytes()) - return err -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { - return err - } - _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) - return err -} - -func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { - vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) - if err != nil { - return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) - } - - g := encoding.NewMPI(vsG) - m := encoding.NewOID(c) - - packetLen := 10 /* header length */ - packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - - _, err = w.Write(header[:]) - if err != nil { - return err - } - if _, err = w.Write(g.EncodedBytes()); err != nil { - return err - } - _, err = w.Write(m.EncodedBytes()) - return err -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go deleted file mode 100644 index 4be987609b..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - Format uint8 - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.Format = buf[0] - l.IsBinary = l.Format == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go deleted file mode 100644 index 2c3e3f50b2..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go +++ /dev/null @@ -1,29 +0,0 @@ -package packet - -// Notation type represents a Notation Data subpacket -// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16 -type Notation struct { - Name string - Value []byte - IsCritical bool - IsHumanReadable bool -} - -func (notation *Notation) getData() []byte { - nameData := []byte(notation.Name) - nameLen := len(nameData) - valueLen := len(notation.Value) - - data := make([]byte, 8+nameLen+valueLen) - if notation.IsHumanReadable { - data[0] = 0x80 - } - - data[4] = byte(nameLen >> 8) - data[5] = byte(nameLen) - data[6] = byte(valueLen >> 8) - data[7] = byte(valueLen) - copy(data[8:8+nameLen], nameData) - copy(data[8+nameLen:], notation.Value) - return data -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go deleted file mode 100644 index 4f26d0a00b..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. On successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index fff119e639..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = algorithm.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go deleted file mode 100644 index 4f8204079f..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - EncodedLength []byte // Store the original encoded length for signature verifications. - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - var encodedLength []byte - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - encodedLength = contents[0:1] - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - encodedLength = contents[0:2] - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - encodedLength = contents[0:5] - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.EncodedLength = encodedLength - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - copy(buf, osp.EncodedLength) - n := len(osp.EncodedLength) - - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go deleted file mode 100644 index f73f6f40d6..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" - -import ( - "bytes" - "crypto/cipher" - "crypto/rsa" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - buf bytes.Buffer - lengthByte [1]byte -} - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - bufLen := w.buf.Len() - if bufLen > 512 { - for power := uint(30); ; power-- { - l := 1 << power - if bufLen >= l { - w.lengthByte[0] = 224 + uint8(power) - _, err = w.w.Write(w.lengthByte[:]) - if err != nil { - return - } - var m int - m, err = w.w.Write(w.buf.Next(l)) - if err != nil { - return - } - if m != l { - return 0, io.ErrShortWrite - } - break - } - } - } - return w.buf.Write(p) -} - -func (w *partialLengthWriter) Close() (err error) { - len := w.buf.Len() - err = serializeLength(w.w, len) - if err != nil { - return err - } - _, err = w.buf.WriteTo(w.w) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - err = serializeType(w, ptype) - if err != nil { - return - } - return serializeLength(w, length) -} - -// serializeType writes an OpenPGP packet type to w. See RFC 4880, section -// 4.2. -func serializeType(w io.Writer, ptype packetType) (err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - return -} - -// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section -// 4.2.2. -func serializeLength(w io.Writer, length int) (err error) { - var buf [5]byte - var n int - - if length < 192 { - buf[0] = byte(length) - n = 1 - } else if length < 8384 { - length -= 192 - buf[0] = 192 + byte(length>>8) - buf[1] = byte(length) - n = 2 - } else { - buf[0] = 255 - buf[1] = byte(length >> 24) - buf[2] = byte(length >> 16) - buf[3] = byte(length >> 8) - buf[4] = byte(length) - n = 5 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - err = serializeType(w, ptype) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 - packetTypeAEADEncrypted packetType = 20 -) - -// EncryptedDataPacket holds encrypted data. It is currently implemented by -// SymmetricallyEncrypted and AEADEncrypted. -type EncryptedDataPacket interface { - Decrypt(CipherFunction, []byte) (io.ReadCloser, error) -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - p = new(Signature) - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - isSubkey := tag == packetTypePublicSubkey - p = &PublicKey{IsSubkey: isSubkey} - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedIntegrityProtected: - se := new(SymmetricallyEncrypted) - se.IntegrityProtected = true - p = se - case packetTypeAEADEncrypted: - p = new(AEADEncrypted) - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0x00 - SigTypeText = 0x01 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 - SigTypeCertificationRevocation = 0x30 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt - PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction algorithm.CipherFunction - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - return algorithm.CipherFunction(cipher).KeySize() -} - -// IsSupported returns true if the cipher is supported from the library -func (cipher CipherFunction) IsSupported() bool { - return algorithm.CipherFunction(cipher).KeySize() > 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - return algorithm.CipherFunction(cipher).BlockSize() -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - return algorithm.CipherFunction(cipher).New(key) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) - -// AEADMode represents the different Authenticated Encryption with Associated -// Data specified for OpenPGP. -// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 -type AEADMode algorithm.AEADMode - -const ( - AEADModeEAX AEADMode = 1 - AEADModeOCB AEADMode = 2 - AEADModeGCM AEADMode = 3 -) - -func (mode AEADMode) IvLength() int { - return algorithm.AEADMode(mode).NonceLength() -} - -func (mode AEADMode) TagLength() int { - return algorithm.AEADMode(mode).TagLength() -} - -// new returns a fresh instance of the given mode. -func (mode AEADMode) new(block cipher.Block) cipher.AEAD { - return algorithm.AEADMode(mode).New(block) -} - -// ReasonForRevocation represents a revocation reason code as per RFC4880 -// section 5.2.3.23. -type ReasonForRevocation uint8 - -const ( - NoReason ReasonForRevocation = 0 - KeySuperseded ReasonForRevocation = 1 - KeyCompromised ReasonForRevocation = 2 - KeyRetired ReasonForRevocation = 3 -) - -// Curve is a mapping to supported ECC curves for key generation. -// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats -type Curve string - -const ( - Curve25519 Curve = "Curve25519" - Curve448 Curve = "Curve448" - CurveNistP256 Curve = "P256" - CurveNistP384 Curve = "P384" - CurveNistP521 Curve = "P521" - CurveSecP256k1 Curve = "SecP256k1" - CurveBrainpoolP256 Curve = "BrainpoolP256" - CurveBrainpoolP384 Curve = "BrainpoolP384" - CurveBrainpoolP512 Curve = "BrainpoolP512" -) - -// TrustLevel represents a trust level per RFC4880 5.2.3.13 -type TrustLevel uint8 - -// TrustAmount represents a trust amount per RFC4880 5.2.3.13 -type TrustAmount uint8 diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go deleted file mode 100644 index 2898fa74c3..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,739 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "io" - "io/ioutil" - "math/big" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/ecdsa" - "github.com/ProtonMail/go-crypto/openpgp/eddsa" - "github.com/ProtonMail/go-crypto/openpgp/elgamal" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "github.com/ProtonMail/go-crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or - // crypto.Signer/crypto.Decrypter (Decryptor RSA only). - PrivateKey interface{} - sha1Checksum bool - iv []byte - - // Type of encryption of the S2K packet - // Allowed values are 0 (Not encrypted), 254 (SHA1), or - // 255 (2-byte checksum) - s2kType S2KType - // Full parameters of the S2K packet - s2kParams *s2k.Params -} - -//S2KType s2k packet type -type S2KType uint8 - -const ( - // S2KNON unencrypt - S2KNON S2KType = 0 - // S2KSHA1 sha1 sum check - S2KSHA1 S2KType = 254 - // S2KCHECKSUM sum check - S2KCHECKSUM S2KType = 255 -) - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA, ECDSA or EdDSA. -func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.(type) { - case *rsa.PrivateKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) - case rsa.PrivateKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) - case *ecdsa.PrivateKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) - case ecdsa.PrivateKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) - case *eddsa.PrivateKey: - pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) - case eddsa.PrivateKey: - pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) - default: - panic("openpgp: unknown signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. -func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { - pk := new(PrivateKey) - switch priv := decrypter.(type) { - case *rsa.PrivateKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - case *elgamal.PrivateKey: - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - case *ecdh.PrivateKey: - pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) - default: - panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") - } - pk.PrivateKey = decrypter - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - v5 := pk.PublicKey.Version == 5 - - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.s2kType = S2KType(buf[0]) - var optCount [1]byte - if v5 { - if _, err = readFull(r, optCount[:]); err != nil { - return - } - } - - switch pk.s2kType { - case S2KNON: - pk.s2k = nil - pk.Encrypted = false - case S2KSHA1, S2KCHECKSUM: - if v5 && pk.s2kType == S2KCHECKSUM { - return errors.StructuralError("wrong s2k identifier for version 5") - } - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - if pk.cipher != 0 && !pk.cipher.IsSupported() { - return errors.UnsupportedError("unsupported cipher function in private key") - } - pk.s2kParams, err = s2k.ParseIntoParams(r) - if err != nil { - return - } - if pk.s2kParams.Dummy() { - return - } - pk.s2k, err = pk.s2kParams.Function() - if err != nil { - return - } - pk.Encrypted = true - if pk.s2kType == S2KSHA1 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - var privateKeyData []byte - if v5 { - var n [4]byte /* secret material four octet count */ - _, err = readFull(r, n[:]) - if err != nil { - return - } - count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) - if !pk.Encrypted { - count = count + 2 /* two octet checksum */ - } - privateKeyData = make([]byte, count) - _, err = readFull(r, privateKeyData) - if err != nil { - return - } - } else { - privateKeyData, err = ioutil.ReadAll(r) - if err != nil { - return - } - } - if !pk.Encrypted { - if len(privateKeyData) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(privateKeyData)-2; i++ { - sum += uint16(privateKeyData[i]) - } - if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || - privateKeyData[len(privateKeyData)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - privateKeyData = privateKeyData[:len(privateKeyData)-2] - return pk.parsePrivateKey(privateKeyData) - } - - pk.encryptedData = privateKeyData - return -} - -// Dummy returns true if the private key is a dummy key. This is a GNU extension. -func (pk *PrivateKey) Dummy() bool { - return pk.s2kParams.Dummy() -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - contents := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(contents) - if err != nil { - return - } - if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { - return - } - - optional := bytes.NewBuffer(nil) - if pk.Encrypted || pk.Dummy() { - optional.Write([]byte{uint8(pk.cipher)}) - if err := pk.s2kParams.Serialize(optional); err != nil { - return err - } - if pk.Encrypted { - optional.Write(pk.iv) - } - } - if pk.Version == 5 { - contents.Write([]byte{uint8(optional.Len())}) - } - io.Copy(contents, optional) - - if !pk.Dummy() { - l := 0 - var priv []byte - if !pk.Encrypted { - buf := bytes.NewBuffer(nil) - err = pk.serializePrivateKey(buf) - if err != nil { - return err - } - l = buf.Len() - checksum := mod64kHash(buf.Bytes()) - buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) - priv = buf.Bytes() - } else { - priv, l = pk.encryptedData, len(pk.encryptedData) - } - - if pk.Version == 5 { - contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) - } - contents.Write(priv) - } - - ptype := packetTypePrivateKey - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, contents.Len()) - if err != nil { - return - } - _, err = io.Copy(w, contents) - if err != nil { - return - } - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { - return err - } - if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { - return err - } - if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { - return err - } - _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) - return err -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) - return err -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) - return err -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes()) - return err -} - -func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error { - _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) - return err -} - -func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { - _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) - return err -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if pk.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - err := pk.parsePrivateKey(data) - if _, ok := err.(errors.KeyInvalidError); ok { - return errors.KeyInvalidError("invalid key parameters") - } - if err != nil { - return err - } - - // Mark key as unencrypted - pk.s2kType = S2KNON - pk.s2k = nil - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -// Encrypt encrypts an unencrypted private key using a passphrase. -func (pk *PrivateKey) Encrypt(passphrase []byte) error { - priv := bytes.NewBuffer(nil) - err := pk.serializePrivateKey(priv) - if err != nil { - return err - } - - //Default config of private key encryption - pk.cipher = CipherAES256 - s2kConfig := &s2k.Config{ - S2KMode: 3, //Iterated - S2KCount: 65536, - Hash: crypto.SHA256, - } - - pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig) - if err != nil { - return err - } - privateKeyBytes := priv.Bytes() - key := make([]byte, pk.cipher.KeySize()) - - pk.sha1Checksum = true - pk.s2k, err = pk.s2kParams.Function() - if err != nil { - return err - } - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - pk.iv = make([]byte, pk.cipher.blockSize()) - _, err = rand.Read(pk.iv) - if err != nil { - return err - } - cfb := cipher.NewCFBEncrypter(block, pk.iv) - - if pk.sha1Checksum { - pk.s2kType = S2KSHA1 - h := sha1.New() - h.Write(privateKeyBytes) - sum := h.Sum(nil) - privateKeyBytes = append(privateKeyBytes, sum...) - } else { - pk.s2kType = S2KCHECKSUM - var sum uint16 - for _, b := range privateKeyBytes { - sum += uint16(b) - } - priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) - } - - pk.encryptedData = make([]byte, len(privateKeyBytes)) - cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) - pk.Encrypted = true - pk.PrivateKey = nil - return err -} - -func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(w, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(w, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(w, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(w, priv) - case *eddsa.PrivateKey: - err = serializeEdDSAPrivateKey(w, priv) - case *ecdh.PrivateKey: - err = serializeECDHPrivateKey(w, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - return -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - case PubKeyAlgoECDH: - return pk.parseECDHPrivateKey(data) - case PubKeyAlgoEdDSA: - return pk.parseEdDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - p := new(encoding.MPI) - if _, err := p.ReadFrom(buf); err != nil { - return err - } - - q := new(encoding.MPI) - if _, err := q.ReadFrom(buf); err != nil { - return err - } - - rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) - if err := rsaPriv.Validate(); err != nil { - return errors.KeyInvalidError(err.Error()) - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x := new(encoding.MPI) - if _, err := x.ReadFrom(buf); err != nil { - return err - } - - dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) - if err := validateDSAParameters(dsaPriv); err != nil { - return err - } - pk.PrivateKey = dsaPriv - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x := new(encoding.MPI) - if _, err := x.ReadFrom(buf); err != nil { - return err - } - - priv.X = new(big.Int).SetBytes(x.Bytes()) - if err := validateElGamalParameters(priv); err != nil { - return err - } - pk.PrivateKey = priv - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub) - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil { - return err - } - if err := ecdsa.Validate(ecdsaPriv); err != nil { - return err - } - pk.PrivateKey = ecdsaPriv - - return nil -} - -func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { - ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) - ecdhPriv := ecdh.NewPrivateKey(*ecdhPub) - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil { - return err - } - - if err := ecdh.Validate(ecdhPriv); err != nil { - return err - } - - pk.PrivateKey = ecdhPriv - - return nil -} - -func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { - eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) - eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) - eddsaPriv.PublicKey = *eddsaPub - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil { - return err - } - - if err := eddsa.Validate(eddsaPriv); err != nil { - return err - } - - pk.PrivateKey = eddsaPriv - - return nil -} - -func validateDSAParameters(priv *dsa.PrivateKey) error { - p := priv.P // group prime - q := priv.Q // subgroup order - g := priv.G // g has order q mod p - x := priv.X // secret - y := priv.Y // y == g**x mod p - one := big.NewInt(1) - // expect g, y >= 2 and g < p - if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { - return errors.KeyInvalidError("dsa: invalid group") - } - // expect p > q - if p.Cmp(q) <= 0 { - return errors.KeyInvalidError("dsa: invalid group prime") - } - // q should be large enough and divide p-1 - pSub1 := new(big.Int).Sub(p, one) - if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { - return errors.KeyInvalidError("dsa: invalid order") - } - // confirm that g has order q mod p - if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { - return errors.KeyInvalidError("dsa: invalid order") - } - // check y - if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { - return errors.KeyInvalidError("dsa: mismatching values") - } - - return nil -} - -func validateElGamalParameters(priv *elgamal.PrivateKey) error { - p := priv.P // group prime - g := priv.G // g has order p-1 mod p - x := priv.X // secret - y := priv.Y // y == g**x mod p - one := big.NewInt(1) - // Expect g, y >= 2 and g < p - if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { - return errors.KeyInvalidError("elgamal: invalid group") - } - if p.BitLen() < 1024 { - return errors.KeyInvalidError("elgamal: group order too small") - } - pSub1 := new(big.Int).Sub(p, one) - if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { - return errors.KeyInvalidError("elgamal: invalid group") - } - // Since p-1 is not prime, g might have a smaller order that divides p-1. - // We cannot confirm the exact order of g, but we make sure it is not too small. - gExpI := new(big.Int).Set(g) - i := 1 - threshold := 2 << 17 // we want order > threshold - for i < threshold { - i++ // we check every order to make sure key validation is not easily bypassed by guessing y' - gExpI.Mod(new(big.Int).Mul(gExpI, g), p) - if gExpI.Cmp(one) == 0 { - return errors.KeyInvalidError("elgamal: order too small") - } - } - // Check y - if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { - return errors.KeyInvalidError("elgamal: mismatching values") - } - - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go deleted file mode 100644 index 029b8f1aab..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go +++ /dev/null @@ -1,12 +0,0 @@ -package packet - -// Generated with `gpg --export-secret-keys "Test Key 2"` -const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" - -// Generated by `gpg --export-secret-keys` followed by a manual extraction of -// the ElGamal subkey from the packets. -const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" - -// pkcs1PrivKeyHex is a PKCS#1, RSA private key. -// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` -const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go deleted file mode 100644 index e0f5f74a93..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,802 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/dsa" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/ecdsa" - "github.com/ProtonMail/go-crypto/openpgp/eddsa" - "github.com/ProtonMail/go-crypto/openpgp/elgamal" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" -) - -type kdfHashFunction byte -type kdfAlgorithm byte - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - Version int - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey - Fingerprint []byte - KeyId uint64 - IsSubkey bool - - // RFC 4880 fields - n, e, p, q, g, y encoding.Field - - // RFC 6637 fields - // oid contains the OID byte sequence identifying the elliptic curve used - oid encoding.Field - - // kdf stores key derivation function parameters - // used for ECDH encryption. See RFC 6637, Section 9. - kdf encoding.Field -} - -// UpgradeToV5 updates the version of the key to v5, and updates all necessary -// fields. -func (pk *PublicKey) UpgradeToV5() { - pk.Version = 5 - pk.setFingerprintAndKeyId() -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeForHash(io.Writer) error - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: new(encoding.MPI).SetBig(pub.N), - e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerprintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: new(encoding.MPI).SetBig(pub.P), - q: new(encoding.MPI).SetBig(pub.Q), - g: new(encoding.MPI).SetBig(pub.G), - y: new(encoding.MPI).SetBig(pub.Y), - } - - pk.setFingerprintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: new(encoding.MPI).SetBig(pub.P), - g: new(encoding.MPI).SetBig(pub.G), - y: new(encoding.MPI).SetBig(pub.Y), - } - - pk.setFingerprintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - p: encoding.NewMPI(pub.MarshalPoint()), - } - - curveInfo := ecc.FindByCurve(pub.GetCurve()) - if curveInfo == nil { - panic("unknown elliptic curve") - } - pk.oid = curveInfo.Oid - pk.setFingerprintAndKeyId() - return pk -} - -func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { - var pk *PublicKey - var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) - pk = &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDH, - PublicKey: pub, - p: encoding.NewMPI(pub.MarshalPoint()), - kdf: kdf, - } - - curveInfo := ecc.FindByCurve(pub.GetCurve()) - - if curveInfo == nil { - panic("unknown elliptic curve") - } - - pk.oid = curveInfo.Oid - pk.setFingerprintAndKeyId() - return pk -} - -func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey { - curveInfo := ecc.FindByCurve(pub.GetCurve()) - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoEdDSA, - PublicKey: pub, - oid: curveInfo.Oid, - // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B - p: encoding.NewMPI(pub.MarshalPoint()), - } - - pk.setFingerprintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 && buf[0] != 5 { - return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) - } - - pk.Version = int(buf[0]) - if pk.Version == 5 { - var n [4]byte - _, err = readFull(r, n[:]) - if err != nil { - return - } - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - err = pk.parseECDSA(r) - case PubKeyAlgoECDH: - err = pk.parseECDH(r) - case PubKeyAlgoEdDSA: - err = pk.parseEdDSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerprintAndKeyId() - return -} - -func (pk *PublicKey) setFingerprintAndKeyId() { - // RFC 4880, section 12.2 - if pk.Version == 5 { - fingerprint := sha256.New() - pk.SerializeForHash(fingerprint) - pk.Fingerprint = make([]byte, 32) - copy(pk.Fingerprint, fingerprint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) - } else { - fingerprint := sha1.New() - pk.SerializeForHash(fingerprint) - pk.Fingerprint = make([]byte, 20) - copy(pk.Fingerprint, fingerprint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) - } -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n = new(encoding.MPI) - if _, err = pk.n.ReadFrom(r); err != nil { - return - } - pk.e = new(encoding.MPI) - if _, err = pk.e.ReadFrom(r); err != nil { - return - } - - if len(pk.e.Bytes()) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.Bytes()), - E: 0, - } - for i := 0; i < len(pk.e.Bytes()); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.Bytes()[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - pk.q = new(encoding.MPI) - if _, err = pk.q.ReadFrom(r); err != nil { - return - } - pk.g = new(encoding.MPI) - if _, err = pk.g.ReadFrom(r); err != nil { - return - } - pk.y = new(encoding.MPI) - if _, err = pk.y.ReadFrom(r); err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) - dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) - dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) - dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - pk.g = new(encoding.MPI) - if _, err = pk.g.ReadFrom(r); err != nil { - return - } - pk.y = new(encoding.MPI) - if _, err = pk.y.ReadFrom(r); err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) - elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) - elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) - pk.PublicKey = elgamal - return -} - -// parseECDSA parses ECDSA public key material from the given Reader. See -// RFC 6637, Section 9. -func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { - pk.oid = new(encoding.OID) - if _, err = pk.oid.ReadFrom(r); err != nil { - return - } - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - - curveInfo := ecc.FindByOid(pk.oid) - if curveInfo == nil { - return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) - } - - c, ok := curveInfo.Curve.(ecc.ECDSACurve) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) - } - - ecdsaKey := ecdsa.NewPublicKey(c) - err = ecdsaKey.UnmarshalPoint(pk.p.Bytes()) - pk.PublicKey = ecdsaKey - - return -} - -// parseECDH parses ECDH public key material from the given Reader. See -// RFC 6637, Section 9. -func (pk *PublicKey) parseECDH(r io.Reader) (err error) { - pk.oid = new(encoding.OID) - if _, err = pk.oid.ReadFrom(r); err != nil { - return - } - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - pk.kdf = new(encoding.OID) - if _, err = pk.kdf.ReadFrom(r); err != nil { - return - } - - curveInfo := ecc.FindByOid(pk.oid) - - if curveInfo == nil { - return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) - } - - c, ok := curveInfo.Curve.(ecc.ECDHCurve) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) - } - - if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { - return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 { - return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved))) - } - kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] - if !ok { - return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) - } - kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] - if !ok { - return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) - } - - ecdhKey := ecdh.NewPublicKey(c, kdfHash, kdfCipher) - err = ecdhKey.UnmarshalPoint(pk.p.Bytes()) - pk.PublicKey = ecdhKey - - return -} - -func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { - pk.oid = new(encoding.OID) - if _, err = pk.oid.ReadFrom(r); err != nil { - return - } - curveInfo := ecc.FindByOid(pk.oid) - if curveInfo == nil { - return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) - } - - c, ok := curveInfo.Curve.(ecc.EdDSACurve) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) - } - - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - - pub := eddsa.NewPublicKey(c) - - switch flag := pk.p.Bytes()[0]; flag { - case 0x04: - // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt - return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) - case 0x40: - err = pub.UnmarshalPoint(pk.p.Bytes()) - default: - return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) - } - - pk.PublicKey = pub - return -} - -// SerializeForHash serializes the PublicKey to w with the special packet -// header format needed for hashing. -func (pk *PublicKey) SerializeForHash(w io.Writer) error { - pk.SerializeSignaturePrefix(w) - return pk.serializeWithoutHeaders(w) -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { - var pLength = pk.algorithmSpecificByteCount() - if pk.Version == 5 { - pLength += 10 // version, timestamp (4), algorithm, key octet count (4). - w.Write([]byte{ - 0x9A, - byte(pLength >> 24), - byte(pLength >> 16), - byte(pLength >> 8), - byte(pLength), - }) - return - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - length += pk.algorithmSpecificByteCount() - if pk.Version == 5 { - length += 4 // octet key count - } - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -func (pk *PublicKey) algorithmSpecificByteCount() int { - length := 0 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += int(pk.n.EncodedLength()) - length += int(pk.e.EncodedLength()) - case PubKeyAlgoDSA: - length += int(pk.p.EncodedLength()) - length += int(pk.q.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) - case PubKeyAlgoElGamal: - length += int(pk.p.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) - case PubKeyAlgoECDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - case PubKeyAlgoECDH: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - length += int(pk.kdf.EncodedLength()) - case PubKeyAlgoEdDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - default: - panic("unknown public key algorithm") - } - return length -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - t := uint32(pk.CreationTime.Unix()) - if _, err = w.Write([]byte{ - byte(pk.Version), - byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), - byte(pk.PubKeyAlgo), - }); err != nil { - return - } - - if pk.Version == 5 { - n := pk.algorithmSpecificByteCount() - if _, err = w.Write([]byte{ - byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), - }); err != nil { - return - } - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - if _, err = w.Write(pk.n.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.e.EncodedBytes()) - return - case PubKeyAlgoDSA: - if _, err = w.Write(pk.p.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.q.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.g.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.y.EncodedBytes()) - return - case PubKeyAlgoElGamal: - if _, err = w.Write(pk.p.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.g.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.y.EncodedBytes()) - return - case PubKeyAlgoECDSA: - if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.p.EncodedBytes()) - return - case PubKeyAlgoECDH: - if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.p.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.kdf.EncodedBytes()) - return - case PubKeyAlgoEdDSA: - if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.p.EncodedBytes()) - return - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { - sig.AddMetadataToHashSuffix() - } - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - case PubKeyAlgoEdDSA: - eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey) - if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) { - return errors.SignatureError("EdDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - err = pk.SerializeForHash(h) - if err != nil { - return nil, err - } - - err = signed.SerializeForHash(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - err = pk.SerializeForHash(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, -// made by this public key, of signed. -func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.BitLength() - case PubKeyAlgoDSA: - bitLength = pk.p.BitLength() - case PubKeyAlgoElGamal: - bitLength = pk.p.BitLength() - case PubKeyAlgoECDSA: - bitLength = pk.p.BitLength() - case PubKeyAlgoECDH: - bitLength = pk.p.BitLength() - case PubKeyAlgoEdDSA: - bitLength = pk.p.BitLength() - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired or is created in the future. -func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { - if pk.CreationTime.After(currentTime) { - return true - } - if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { - return false - } - expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go deleted file mode 100644 index b255f1f6f8..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go +++ /dev/null @@ -1,24 +0,0 @@ -package packet - -const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" - -const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" - -const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" - -const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" - -const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" - -const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" - -const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" - -const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" - -const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" - -const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" - -// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key -const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go deleted file mode 100644 index 10215fe5f2..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. - if _, ok := err.(errors.UnknownPacketTypeError); ok { - continue - } - if _, ok := err.(errors.UnsupportedError); ok { - switch p.(type) { - case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: - return nil, err - } - continue - } - return nil, err - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go deleted file mode 100644 index 9f0b1b1978..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,1068 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "encoding/binary" - "hash" - "io" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/ecdsa" - "github.com/ProtonMail/go-crypto/openpgp/eddsa" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage - KeyFlagSplitKey - KeyFlagAuthenticate - _ - KeyFlagGroupKey -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - Version int - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - - // Metadata includes format, filename and time, and is protected by v5 - // signatures of type 0x00 or 0x01. This metadata is included into the hash - // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. - Metadata *LiteralData - - CreationTime time.Time - - RSASignature encoding.Field - DSASigR, DSASigS encoding.Field - ECDSASigR, ECDSASigS encoding.Field - EdDSASigR, EdDSASigS encoding.Field - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - PreferredCipherSuites [][2]uint8 - IssuerKeyId *uint64 - IssuerFingerprint []byte - SignerUserId *string - IsPrimaryId *bool - Notations []*Notation - - // TrustLevel and TrustAmount can be set by the signer to assert that - // the key is not only valid but also trustworthy at the specified - // level. - // See RFC 4880, section 5.2.3.13 for details. - TrustLevel TrustLevel - TrustAmount TrustAmount - - // TrustRegularExpression can be used in conjunction with trust Signature - // packets to limit the scope of the trust that is extended. - // See RFC 4880, section 5.2.3.14 for details. - TrustRegularExpression *string - - // PolicyURI can be set to the URI of a document that describes the - // policy under which the signature was issued. See RFC 4880, section - // 5.2.3.20 for details. - PolicyURI string - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *ReasonForRevocation - RevocationReasonText string - - // In a self-signature, these flags are set there is a features subpacket - // indicating that the issuer implementation supports these features - // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket - SEIPDv1, SEIPDv2 bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 && buf[0] != 5 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - sig.Version = int(buf[0]) - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - - if sig.Version < 5 { - sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) - } else { - sig.Hash, ok = algorithm.HashIdToHash(buf[2]) - } - - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - hashedSubpackets := make([]byte, hashedSubpacketsLength) - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - err = sig.buildHashSuffix(hashedSubpackets) - if err != nil { - return - } - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature = new(encoding.MPI) - _, err = sig.RSASignature.ReadFrom(r) - case PubKeyAlgoDSA: - sig.DSASigR = new(encoding.MPI) - if _, err = sig.DSASigR.ReadFrom(r); err != nil { - return - } - - sig.DSASigS = new(encoding.MPI) - _, err = sig.DSASigS.ReadFrom(r) - case PubKeyAlgoECDSA: - sig.ECDSASigR = new(encoding.MPI) - if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { - return - } - - sig.ECDSASigS = new(encoding.MPI) - _, err = sig.ECDSASigS.ReadFrom(r) - case PubKeyAlgoEdDSA: - sig.EdDSASigR = new(encoding.MPI) - if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { - return - } - - sig.EdDSASigS = new(encoding.MPI) - if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { - return - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - trustSubpacket signatureSubpacketType = 5 - regularExpressionSubpacket signatureSubpacketType = 6 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - notationDataSubpacket signatureSubpacketType = 20 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - policyUriSubpacket signatureSubpacketType = 26 - keyFlagsSubpacket signatureSubpacketType = 27 - signerUserIdSubpacket signatureSubpacketType = 28 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 - issuerFingerprintSubpacket signatureSubpacketType = 33 - prefCipherSuitesSubpacket signatureSubpacketType = 39 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - if !isHashed && - packetType != issuerSubpacket && - packetType != issuerFingerprintSubpacket && - packetType != embeddedSignatureSubpacket { - return - } - switch packetType { - case creationTimeSubpacket: - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case trustSubpacket: - // Trust level and amount, section 5.2.3.13 - sig.TrustLevel = TrustLevel(subpacket[0]) - sig.TrustAmount = TrustAmount(subpacket[1]) - case regularExpressionSubpacket: - // Trust regular expression, section 5.2.3.14 - // RFC specifies the string should be null-terminated; remove a null byte from the end - if subpacket[len(subpacket)-1] != 0x00 { - err = errors.StructuralError("expected regular expression to be null-terminated") - return - } - trustRegularExpression := string(subpacket[:len(subpacket)-1]) - sig.TrustRegularExpression = &trustRegularExpression - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if sig.Version > 4 { - err = errors.StructuralError("issuer subpacket found in v5 key") - return - } - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case notationDataSubpacket: - // Notation data, section 5.2.3.16 - if len(subpacket) < 8 { - err = errors.StructuralError("notation data subpacket with bad length") - return - } - - nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5]) - valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7]) - if len(subpacket) != int(nameLength) + int(valueLength) + 8 { - err = errors.StructuralError("notation data subpacket with bad length") - return - } - - notation := Notation{ - IsHumanReadable: (subpacket[0] & 0x80) == 0x80, - Name: string(subpacket[8: (nameLength + 8)]), - Value: subpacket[(nameLength + 8) : (valueLength + nameLength + 8)], - IsCritical: isCritical, - } - - sig.Notations = append(sig.Notations, ¬ation) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - if subpacket[0]&KeyFlagSplitKey != 0 { - sig.FlagSplitKey = true - } - if subpacket[0]&KeyFlagAuthenticate != 0 { - sig.FlagAuthenticate = true - } - if subpacket[0]&KeyFlagGroupKey != 0 { - sig.FlagGroupKey = true - } - case signerUserIdSubpacket: - userId := string(subpacket) - sig.SignerUserId = &userId - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(ReasonForRevocation) - *sig.RevocationReason = ReasonForRevocation(subpacket[0]) - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. - if len(subpacket) > 0 { - if subpacket[0]&0x01 != 0 { - sig.SEIPDv1 = true - } - // 0x02 and 0x04 are reserved - if subpacket[0]&0x08 != 0 { - sig.SEIPDv2 = true - } - } - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - case policyUriSubpacket: - // Policy URI, section 5.2.3.20 - sig.PolicyURI = string(subpacket) - case issuerFingerprintSubpacket: - v, l := subpacket[0], len(subpacket[1:]) - if v == 5 && l != 32 || v != 5 && l != 20 { - return nil, errors.StructuralError("bad fingerprint length") - } - sig.IssuerFingerprint = make([]byte, l) - copy(sig.IssuerFingerprint, subpacket[1:]) - sig.IssuerKeyId = new(uint64) - if v == 5 { - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) - } else { - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) - } - case prefCipherSuitesSubpacket: - // Preferred AEAD cipher suites - // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites - if len(subpacket) % 2 != 0 { - err = errors.StructuralError("invalid aead cipher suite length") - return - } - - sig.PreferredCipherSuites = make([][2]byte, len(subpacket) / 2) - - for i := 0; i < len(subpacket) / 2; i++ { - sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]} - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { - if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { - return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) - } - return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - if subpacket.isCritical { - to[n] |= 0x80 - } - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// SigExpired returns whether sig is a signature that has expired or is created -// in the future. -func (sig *Signature) SigExpired(currentTime time.Time) bool { - if sig.CreationTime.After(currentTime) { - return true - } - if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { - var hashId byte - var ok bool - - if sig.Version < 5 { - hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash) - } else { - hashId, ok = algorithm.HashToHashId(sig.Hash) - } - - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - - hashedFields := bytes.NewBuffer([]byte{ - uint8(sig.Version), - uint8(sig.SigType), - uint8(sig.PubKeyAlgo), - uint8(hashId), - uint8(len(hashedSubpackets) >> 8), - uint8(len(hashedSubpackets)), - }) - hashedFields.Write(hashedSubpackets) - - var l uint64 = uint64(6 + len(hashedSubpackets)) - if sig.Version == 5 { - hashedFields.Write([]byte{0x05, 0xff}) - hashedFields.Write([]byte{ - uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), - uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), - }) - } else { - hashedFields.Write([]byte{0x04, 0xff}) - hashedFields.Write([]byte{ - uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), - }) - } - sig.HashSuffix = make([]byte, hashedFields.Len()) - copy(sig.HashSuffix, hashedFields.Bytes()) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - hashedSubpackets := make([]byte, hashedSubpacketsLen) - serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) - err = sig.buildHashSuffix(hashedSubpackets) - if err != nil { - return - } - if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { - sig.AddMetadataToHashSuffix() - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - sig.Version = priv.PublicKey.Version - sig.IssuerFingerprint = priv.PublicKey.Fingerprint - sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) - if err != nil { - return err - } - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - sig.RSASignature = encoding.NewMPI(sigdata) - } - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR = new(encoding.MPI).SetBig(r) - sig.DSASigS = new(encoding.MPI).SetBig(s) - } - case PubKeyAlgoECDSA: - sk := priv.PrivateKey.(*ecdsa.PrivateKey) - r, s, err := ecdsa.Sign(config.Random(), sk, digest) - - if err == nil { - sig.ECDSASigR = new(encoding.MPI).SetBig(r) - sig.ECDSASigS = new(encoding.MPI).SetBig(s) - } - case PubKeyAlgoEdDSA: - sk := priv.PrivateKey.(*eddsa.PrivateKey) - r, s, err := eddsa.Sign(sk, digest) - if err == nil { - sig.EdDSASigR = encoding.NewMPI(r) - sig.EdDSASigS = encoding.NewMPI(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, -// the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, - config *Config) error { - h, err := keySignatureHash(hashKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, signingKey, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// RevokeKey computes a revocation signature of pub using priv. On success, the signature is -// stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keyRevocationHash(pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// RevokeSubkey computes a subkey revocation signature of pub using priv. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { - // Identical to a subkey binding signature - return sig.SignKey(pub, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = int(sig.RSASignature.EncodedLength()) - case PubKeyAlgoDSA: - sigLength = int(sig.DSASigR.EncodedLength()) - sigLength += int(sig.DSASigS.EncodedLength()) - case PubKeyAlgoECDSA: - sigLength = int(sig.ECDSASigR.EncodedLength()) - sigLength += int(sig.ECDSASigS.EncodedLength()) - case PubKeyAlgoEdDSA: - sigLength = int(sig.EdDSASigR.EncodedLength()) - sigLength += int(sig.EdDSASigS.EncodedLength()) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - if sig.Version == 5 { - length -= 4 // eight-octet instead of four-octet big endian - } - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - err = sig.serializeBody(w) - if err != nil { - return err - } - return -} - -func (sig *Signature) serializeBody(w io.Writer) (err error) { - hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) - fields := sig.HashSuffix[:6+hashedSubpacketsLen] - _, err = w.Write(fields) - if err != nil { - return - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - _, err = w.Write(sig.RSASignature.EncodedBytes()) - case PubKeyAlgoDSA: - if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { - return - } - _, err = w.Write(sig.DSASigS.EncodedBytes()) - case PubKeyAlgoECDSA: - if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { - return - } - _, err = w.Write(sig.ECDSASigS.EncodedBytes()) - case PubKeyAlgoEdDSA: - if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { - return - } - _, err = w.Write(sig.EdDSASigS.EncodedBytes()) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil && sig.Version == 4 { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) - } - if sig.IssuerFingerprint != nil { - contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) - } - if sig.SignerUserId != nil { - subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) - } - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - if sig.FlagSplitKey { - flags |= KeyFlagSplitKey - } - if sig.FlagAuthenticate { - flags |= KeyFlagAuthenticate - } - if sig.FlagGroupKey { - flags |= KeyFlagGroupKey - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - for _, notation := range sig.Notations { - subpackets = append( - subpackets, - outputSubpacket{ - true, - notationDataSubpacket, - notation.IsCritical, - notation.getData(), - }) - } - - // The following subpackets may only appear in self-signatures. - - var features = byte(0x00) - if sig.SEIPDv1 { - features |= 0x01 - } - if sig.SEIPDv2 { - features |= 0x08 - } - - if features != 0x00 { - subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) - } - - if sig.TrustLevel != 0 { - subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) - } - - if sig.TrustRegularExpression != nil { - // RFC specifies the string should be null-terminated; add a null byte to the end - subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) - } - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - if len(sig.PolicyURI) > 0 { - subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) - } - - if len(sig.PreferredCipherSuites) > 0 { - serialized := make([]byte, len(sig.PreferredCipherSuites)*2) - for i, cipherSuite := range sig.PreferredCipherSuites { - serialized[2*i] = cipherSuite[0] - serialized[2*i+1] = cipherSuite[1] - } - subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) - } - - // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. - if sig.RevocationReason != nil { - subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) - } - - // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. - if sig.EmbeddedSignature != nil { - var buf bytes.Buffer - err = sig.EmbeddedSignature.serializeBody(&buf) - if err != nil { - return - } - subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) - } - - return -} - -// AddMetadataToHashSuffix modifies the current hash suffix to include metadata -// (format, filename, and time). Version 5 keys protect this data including it -// in the hash computation. See section 5.2.4. -func (sig *Signature) AddMetadataToHashSuffix() { - if sig == nil || sig.Version != 5 { - return - } - if sig.SigType != 0x00 && sig.SigType != 0x01 { - return - } - lit := sig.Metadata - if lit == nil { - // This will translate into six 0x00 bytes. - lit = &LiteralData{} - } - - // Extract the current byte count - n := sig.HashSuffix[len(sig.HashSuffix)-8:] - l := uint64( - uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | - uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) - - suffix := bytes.NewBuffer(nil) - suffix.Write(sig.HashSuffix[:l]) - - // Add the metadata - var buf [4]byte - buf[0] = lit.Format - fileName := lit.FileName - if len(lit.FileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - suffix.Write(buf[:2]) - suffix.Write([]byte(lit.FileName)) - binary.BigEndian.PutUint32(buf[:], lit.Time) - suffix.Write(buf[:]) - - // Update the counter and restore trailing bytes - l = uint64(suffix.Len()) - suffix.Write([]byte{0x05, 0xff}) - suffix.Write([]byte{ - uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), - uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), - }) - sig.HashSuffix = suffix.Bytes() -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index bc2caf0e8b..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "crypto/sha256" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/s2k" - "golang.org/x/crypto/hkdf" -) - -// This is the largest session key that we'll support. Since at most 256-bit cipher -// is supported in OpenPGP, this is large enough to contain also the auth tag. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - Version int - CipherFunc CipherFunction - Mode AEADMode - s2k func(out, in []byte) - iv []byte - encryptedKey []byte // Contains also the authentication tag for AEAD -} - -// parse parses an SymmetricKeyEncrypted packet as specified in -// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - var buf [1]byte - - // Version - if _, err := readFull(r, buf[:]); err != nil { - return err - } - ske.Version = int(buf[0]) - if ske.Version != 4 && ske.Version != 5 { - return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") - } - - if ske.Version == 5 { - // Scalar octet count - if _, err := readFull(r, buf[:]); err != nil { - return err - } - } - - // Cipher function - if _, err := readFull(r, buf[:]); err != nil { - return err - } - ske.CipherFunc = CipherFunction(buf[0]) - if !ske.CipherFunc.IsSupported() { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) - } - - if ske.Version == 5 { - // AEAD mode - if _, err := readFull(r, buf[:]); err != nil { - return errors.StructuralError("cannot read AEAD octet from packet") - } - ske.Mode = AEADMode(buf[0]) - - // Scalar octet count - if _, err := readFull(r, buf[:]); err != nil { - return err - } - } - - var err error - if ske.s2k, err = s2k.Parse(r); err != nil { - if _, ok := err.(errors.ErrDummyPrivateKey); ok { - return errors.UnsupportedError("missing key GNU extension in session key") - } - return err - } - - if ske.Version == 5 { - // AEAD IV - iv := make([]byte, ske.Mode.IvLength()) - _, err := readFull(r, iv) - if err != nil { - return errors.StructuralError("cannot read AEAD IV") - } - - ske.iv = iv - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - switch ske.Version { - case 4: - plaintextKey, cipherFunc, err := ske.decryptV4(key) - return plaintextKey, cipherFunc, err - case 5: - plaintextKey, err := ske.decryptV5(key) - return plaintextKey, CipherFunction(0), err - } - err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") - return nil, CipherFunction(0), err -} - -func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError( - "unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if len(plaintextKey) != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError( - "length of decrypted key not equal to cipher keysize") - } - return plaintextKey, cipherFunc, nil -} - -func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { - adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} - aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata) - - plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) - if err != nil { - return nil, err - } - return plaintextKey, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. -// The packet contains a random session key, encrypted by a key derived from -// the given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - - sessionKey := make([]byte, cipherFunc.KeySize()) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - - err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) - if err != nil { - return - } - - key = sessionKey - return -} - -// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. -// The packet contains the given session key, encrypted by a key derived from -// the given passphrase. The returned session key must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { - var version int - if config.AEAD() != nil { - version = 5 - } else { - version = 4 - } - cipherFunc := config.Cipher() - // cipherFunc must be AES - if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 { - return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc))) - } - - keySize := cipherFunc.KeySize() - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - var packetLength int - switch version { - case 4: - packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - case 5: - ivLen := config.AEAD().Mode().IvLength() - tagLen := config.AEAD().Mode().TagLength() - packetLength = 5 + len(s2kBytes) + ivLen + keySize + tagLen - } - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - // Symmetric Key Encrypted Version - buf := []byte{byte(version)} - - if version == 5 { - // Scalar octet count - buf = append(buf, byte(3 + len(s2kBytes) + config.AEAD().Mode().IvLength())) - } - - // Cipher function - buf = append(buf, byte(cipherFunc)) - - if version == 5 { - // AEAD mode - buf = append(buf, byte(config.AEAD().Mode())) - - // Scalar octet count - buf = append(buf, byte(len(s2kBytes))) - } - _, err = w.Write(buf) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - switch version { - case 4: - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - case 5: - mode := config.AEAD().Mode() - adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} - aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata) - - // Sample iv using random reader - iv := make([]byte, config.AEAD().Mode().IvLength()) - _, err = io.ReadFull(config.Random(), iv) - if err != nil { - return - } - // Seal and write (encryptedData includes auth. tag) - - encryptedData := aead.Seal(nil, iv, sessionKey, adata) - _, err = w.Write(iv) - if err != nil { - return - } - _, err = w.Write(encryptedData) - if err != nil { - return - } - } - - return -} - -func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { - hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) - - encryptionKey := make([]byte, c.KeySize()) - _, _ = readFull(hkdfReader, encryptionKey) - - blockCipher := c.new(encryptionKey) - return mode.new(blockCipher) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index dc1a240357..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -const aeadSaltSize = 32 - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - Version int - Contents io.Reader // contains tag for version 2 - IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9 - - // Specific to version 1 - prefix []byte - - // Specific to version 2 - cipher CipherFunction - mode AEADMode - chunkSizeByte byte - salt [aeadSaltSize]byte -} - -const ( - symmetricallyEncryptedVersionMdc = 1 - symmetricallyEncryptedVersionAead = 2 -) - - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.IntegrityProtected { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case symmetricallyEncryptedVersionMdc: - se.Version = symmetricallyEncryptedVersionMdc - case symmetricallyEncryptedVersionAead: - se.Version = symmetricallyEncryptedVersionAead - if err := se.parseAead(r); err != nil { - return err - } - default: - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.Contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted Contents of the -// packet can be read. An incorrect key will only be detected after trying -// to decrypt the entire data. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - if se.Version == symmetricallyEncryptedVersionAead { - return se.decryptAead(key) - } - - return se.decryptMdc(c, key) -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected) - if err != nil { - return - } - - if aeadSupported { - return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key) - } - - return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config) -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go deleted file mode 100644 index 241800c02e..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2023 Proton AG. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha256" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "golang.org/x/crypto/hkdf" - "io" -) - -// parseAead parses a V2 SEIPD packet (AEAD) as specified in -// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 -func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { - headerData := make([]byte, 3) - if n, err := io.ReadFull(r, headerData); n < 3 { - return errors.StructuralError("could not read aead header: " + err.Error()) - } - - // Cipher - se.cipher = CipherFunction(headerData[0]) - // cipherFunc must have block size 16 to use AEAD - if se.cipher.blockSize() != 16 { - return errors.UnsupportedError("invalid aead cipher: " + string(se.cipher)) - } - - // Mode - se.mode = AEADMode(headerData[1]) - if se.mode.TagLength() == 0 { - return errors.UnsupportedError("unknown aead mode: " + string(se.mode)) - } - - // Chunk size - se.chunkSizeByte = headerData[2] - if se.chunkSizeByte > 16 { - return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.chunkSizeByte)) - } - - // Salt - if n, err := io.ReadFull(r, se.salt[:]); n < aeadSaltSize { - return errors.StructuralError("could not read aead salt: " + err.Error()) - } - - return nil -} - -// associatedData for chunks: tag, version, cipher, mode, chunk size byte -func (se *SymmetricallyEncrypted) associatedData() []byte { - return []byte{ - 0xD2, - symmetricallyEncryptedVersionAead, - byte(se.cipher), - byte(se.mode), - se.chunkSizeByte, - } -} - -// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in -// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 -func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { - aead, nonce := getSymmetricallyEncryptedAeadInstance(se.cipher, se.mode, inputKey, se.salt[:], se.associatedData()) - - // Carry the first tagLen bytes - tagLen := se.mode.TagLength() - peekedBytes := make([]byte, tagLen) - n, err := io.ReadFull(se.Contents, peekedBytes) - if n < tagLen || (err != nil && err != io.EOF) { - return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) - } - - return &aeadDecrypter{ - aeadCrypter: aeadCrypter{ - aead: aead, - chunkSize: decodeAEADChunkSize(se.chunkSizeByte), - initialNonce: nonce, - associatedData: se.associatedData(), - chunkIndex: make([]byte, 8), - packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, - }, - reader: se.Contents, - peekedBytes: peekedBytes, - }, nil -} - -// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in -// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 -func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) { - // cipherFunc must have block size 16 to use AEAD - if cipherSuite.Cipher.blockSize() != 16 { - return nil, errors.InvalidArgumentError("invalid aead cipher function") - } - - if cipherSuite.Cipher.KeySize() != len(inputKey) { - return nil, errors.InvalidArgumentError("error in aead serialization: bad key length") - } - - // Data for en/decryption: tag, version, cipher, aead mode, chunk size - prefix := []byte{ - 0xD2, - symmetricallyEncryptedVersionAead, - byte(cipherSuite.Cipher), - byte(cipherSuite.Mode), - chunkSizeByte, - } - - // Write header (that correspond to prefix except first byte) - n, err := ciphertext.Write(prefix[1:]) - if err != nil || n < 4 { - return nil, err - } - - // Random salt - salt := make([]byte, aeadSaltSize) - if _, err := rand.Read(salt); err != nil { - return nil, err - } - - if _, err := ciphertext.Write(salt); err != nil { - return nil, err - } - - aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) - - return &aeadEncrypter{ - aeadCrypter: aeadCrypter{ - aead: aead, - chunkSize: decodeAEADChunkSize(chunkSizeByte), - associatedData: prefix, - chunkIndex: make([]byte, 8), - initialNonce: nonce, - packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, - }, - writer: ciphertext, - }, nil -} - -func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) { - hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData) - - encryptionKey := make([]byte, c.KeySize()) - _, _ = readFull(hkdfReader, encryptionKey) - - // Last 64 bits of nonce are the counter - nonce = make([]byte, mode.IvLength() - 8) - - _, _ = readFull(hkdfReader, nonce) - - blockCipher := c.new(encryptionKey) - aead = mode.new(blockCipher) - - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go deleted file mode 100644 index 3e070f8b82..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "hash" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// seMdcReader wraps an io.Reader with a no-op Close method. -type seMdcReader struct { - in io.Reader -} - -func (ser seMdcReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seMdcReader) Close() error { - return nil -} - -func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) { - if !c.IsSupported() { - return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c))) - } - - if len(key) != c.KeySize() { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.Contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.IntegrityProtected { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - - plaintext := cipher.StreamReader{S: s, R: se.Contents} - - if se.IntegrityProtected { - // IntegrityProtected packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seMdcReader{plaintext}, nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous Contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.ErrMDCMissing - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.ErrMDCMissing - } - } - - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.ErrMDCHashMismatch - } - // The hash already includes the MDC header, but we still check its value - // to confirm encryption correctness - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.ErrMDCMissing - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { - // Disallow old cipher suites - if !c.IsSupported() || c < CipherAES128 { - return nil, errors.InvalidArgumentError("invalid mdc cipher function") - } - - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length") - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - Contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go deleted file mode 100644 index 88ec72c6c4..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" - "io/ioutil" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - - lengthBuf := make([]byte, 5) - n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1) - lengthBuf = lengthBuf[:n] - - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - EncodedLength: lengthBuf, - Contents: buf.Bytes(), - }) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - err = sp.Serialize(&buf) - if err != nil { - return err - } - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go deleted file mode 100644 index 614fbafd5e..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "io/ioutil" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - - // RFC 2822 3.4: alternate simple form of a mailbox - if email == "" && strings.ContainsRune(name, '@') { - email = name - name = "" - } - - return -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go deleted file mode 100644 index e910e18441..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - _ "crypto/sha512" - "hash" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/armor" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/packet" - _ "golang.org/x/crypto/sha3" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - Signature *packet.Signature // the signature packet itself. - SignatureError error // nil if the signature is good. - UnverifiedSignatures []*packet.Signature // all other unverified signature packets. - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted - var edp packet.EncryptedDataPacket - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: - break - default: - continue - } - if keyring != nil { - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - } - case *packet.SymmetricallyEncrypted: - if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() { - return nil, errors.UnsupportedError("message is not integrity protected") - } - edp = p - break ParsePackets - case *packet.AEADEncrypted: - edp = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring, config) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - if errDec != nil { - continue - } - } - // Try to decrypt symmetrically encrypted - decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: - // only for < 5% of cases we will proceed to decrypt the data - if err == nil { - decrypted, err = edp.Decrypt(cipherFunc, key) - if err != nil { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) - if sensitiveParsingErr != nil { - return nil, errors.StructuralError("parsing error") - } - return mdFinal, nil -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash - var prevLast bool -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if prevLast { - return nil, errors.UnsupportedError("nested signature packets") - } - - if p.IsLast { - prevLast = true - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md.SignatureError = err - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - if keyring != nil { - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.IsSigned && md.SignatureError == nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { - return nil, nil, errors.UnsupportedError("unsupported hash function") - } - if !hashFunc.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) - } - h := hashFunc.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (int, error) { - n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) - if sensitiveParsingError == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - return n, mdcErr - } - return n, io.EOF - } - - if sensitiveParsingError != nil { - return n, errors.StructuralError("parsing error") - } - - return n, nil -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails - config *packet.Config -} - -func (scr *signatureCheckReader) Read(buf []byte) (int, error) { - n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) - - // Hash only if required - if scr.md.SignedBy != nil { - scr.wrappedHash.Write(buf[:n]) - } - - if sensitiveParsingError == io.EOF { - var p packet.Packet - var readError error - var sig *packet.Signature - - p, readError = scr.packets.Next() - for readError == nil { - var ok bool - if sig, ok = p.(*packet.Signature); ok { - if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { - sig.Metadata = scr.md.LiteralData - } - - // If signature KeyID matches - if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { - key := scr.md.SignedBy - signatureError := key.PublicKey.VerifySignature(scr.h, sig) - if signatureError == nil { - signatureError = checkSignatureDetails(key, sig, scr.config) - } - scr.md.Signature = sig - scr.md.SignatureError = signatureError - } else { - scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) - } - } - - p, readError = scr.packets.Next() - } - - if scr.md.SignedBy != nil && scr.md.Signature == nil { - if scr.md.UnverifiedSignatures == nil { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") - } else { - scr.md.SignatureError = errors.StructuralError("No matching signature found") - } - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - return n, mdcErr - } - } - return n, io.EOF - } - - if sensitiveParsingError != nil { - return n, errors.StructuralError("parsing error") - } - - return n, nil -} - -// VerifyDetachedSignature takes a signed file and a detached signature and -// returns the signature packet and the entity the signature was signed by, -// if any, and a possible signature verification error. -// If the signer isn't known, ErrUnknownIssuer is returned. -func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - var expectedHashes []crypto.Hash - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) -} - -// VerifyDetachedSignatureAndHash performs the same actions as -// VerifyDetachedSignature and checks that the expected hash functions were used. -func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the entity the signature was signed by, if any, and a possible -// signature verification error. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { - var expectedHashes []crypto.Hash - return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) -} - -// CheckDetachedSignatureAndHash performs the same actions as -// CheckDetachedSignature and checks that the expected hash functions were used. -func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { - _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) - return -} - -func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - expectedHashesLen := len(expectedHashes) - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, nil, err - } - - var ok bool - sig, ok = p.(*packet.Signature) - if !ok { - return nil, nil, errors.StructuralError("non signature packet found") - } - if sig.IssuerKeyId == nil { - return nil, nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - - for i, expectedHash := range expectedHashes { - if hashFunc == expectedHash { - break - } - if i+1 == expectedHashesLen { - return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") - } - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, nil, err - } - - for _, key := range keys { - err = key.PublicKey.VerifySignature(h, sig) - if err == nil { - return sig, key.Entity, checkSignatureDetails(&key, sig, config) - } - } - - return nil, nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body, config) -} - -// checkSignatureDetails returns an error if: -// - The signature (or one of the binding signatures mentioned below) -// has a unknown critical notation data subpacket -// - The primary key of the signing entity is revoked -// The signature was signed by a subkey and: -// - The signing subkey is revoked -// - The primary identity is revoked -// - The signature is expired -// - The primary key of the signing entity is expired according to the -// primary identity binding signature -// The signature was signed by a subkey and: -// - The signing subkey is expired according to the subkey binding signature -// - The signing subkey binding signature is expired -// - The signing subkey cross-signature is expired -// NOTE: The order of these checks is important, as the caller may choose to -// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never -// ignore any other errors. -// TODO: Also return an error if: -// - The primary key is expired according to a direct-key signature -// - (For V5 keys only:) The direct-key signature (exists and) is expired -func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { - now := config.Now() - primaryIdentity := key.Entity.PrimaryIdentity() - signedBySubKey := key.PublicKey != key.Entity.PrimaryKey - sigsToCheck := []*packet.Signature{ signature, primaryIdentity.SelfSignature } - if signedBySubKey { - sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) - } - for _, sig := range sigsToCheck { - for _, notation := range sig.Notations { - if notation.IsCritical && !config.KnownNotation(notation.Name) { - return errors.SignatureError("unknown critical notation: " + notation.Name) - } - } - } - if key.Entity.Revoked(now) || // primary key is revoked - (signedBySubKey && key.Revoked(now)) || // subkey is revoked - primaryIdentity.Revoked(now) { // primary identity is revoked - return errors.ErrKeyRevoked - } - if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired - return errors.ErrKeyExpired - } - if signedBySubKey { - if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired - return errors.ErrKeyExpired - } - } - for _, sig := range sigsToCheck { - if sig.SigExpired(now) { // any of the relevant signatures are expired - return errors.ErrSignatureExpired - } - } - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go deleted file mode 100644 index db6dad5c0b..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ /dev/null @@ -1,274 +0,0 @@ -package openpgp - -const testKey1KeyId uint64 = 0xA34D7E18C20C31BB -const testKey3KeyId uint64 = 0x338934250CCC0360 -const testKeyP256KeyId uint64 = 0xd44a2c495918513e - -const signedInput = "Signed message\nline 2\nline 3\n" -const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" - -const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" - -const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" - -const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" - -const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" - -const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" - -// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt -const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" - -const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" - -const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" - -const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" - -const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" - -const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" - -const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" - -const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" - -const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" - -const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" - -const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" - -const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" - -const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" - -const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" - -const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" - -const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" - -const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1.4.10 (GNU/Linux) - -lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp -idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn -vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB -AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X -0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL -IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk -VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn -gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 -TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx -q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz -dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA -CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 -ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ -eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid -AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV -bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK -/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA -A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX -TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc -lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 -rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN -oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 -QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU -nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC -AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp -BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad -AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL -VrM0m72/jnpKo04= -=zNCn ------END PGP PRIVATE KEY BLOCK-----` - -const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Charset: UTF-8 - -xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 -sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk -Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ -AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD -24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX -+WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 -B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX -fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA -FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 -ex7En5r7rHR5xwX82Msc+Rq9dSyO -=7MrZ ------END PGP PUBLIC KEY BLOCK-----` - -const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` - -const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` - -const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` - -const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` - -const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` - -const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Comment: GPGTools - https://gpgtools.org - -mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY -BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z -tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 -JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV -/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ -K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H -JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx -YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 -b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi -UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M -pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM -AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz -786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd -EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB -=RZia ------END PGP PUBLIC KEY BLOCK----- -` - -const signedMessageV3 = `-----BEGIN PGP MESSAGE----- -Comment: GPGTools - https://gpgtools.org - -owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP -q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka -uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka -DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d -iT57d/OhWwA= -=hG7R ------END PGP MESSAGE----- -` - -// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ -const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- - -lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd -fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA -Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC -X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI -CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 -M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA -MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD -AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF -GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb -DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 -TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== -=IiS2 ------END PGP PRIVATE KEY BLOCK-----` - -// Generated with the above private key -const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- -Version: OpenPGP.js v4.10.7 -Comment: https://openpgpjs.org - -xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf -bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G -y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv -UQdl5MlBka1QSNbMq2Bz7XwNPg4= -=6lbM ------END PGP MESSAGE-----` - -const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv -/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz -/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ -5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 -X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv -9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 -qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb -SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb -vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w -bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm -bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN -aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8 -nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl -+bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J -BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK -chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG -ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw -FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln -cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+ -s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh -6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ -sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz -dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt -YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ -1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i -aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr -fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF -gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q -Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj -jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF -qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH -dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI -zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/ -0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8 -9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x -Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH -UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7 -/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ -nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl -owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM -GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu -a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0 -M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD -lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5 -01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb -hW1Hj9AO8lzggBQ= -=Nt+N ------END PGP PUBLIC KEY BLOCK----- -` - -const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE----- - -wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8 -N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3 -AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI -NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv -KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y -EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN -AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb -UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB -ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+ -7A5CBid5 -=aQkm ------END PGP SIGNATURE----- -` - -const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE----- - -owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x -U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM -f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10 -2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n -vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA= -=fRXs ------END PGP MESSAGE-----` - -const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+ -fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5 -GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0 -JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS -YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6 -AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki -Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf -9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa -JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag -Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr -woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb -LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA -SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP -GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2 -bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X -W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD -AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY -hz3tYjKhoFTKEIq3y3Pp -=h/aX ------END PGP PUBLIC KEY BLOCK-----` diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go deleted file mode 100644 index d0b858345f..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" -) - -// Config collects configuration parameters for s2k key-stretching -// transformations. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // S2KMode is the mode of s2k function. - // It can be 0 (simple), 1(salted), 3(iterated) - // 2(reserved) 100-110(private/experimental). - S2KMode uint8 - // Hash is the default hash function to be used. If - // nil, SHA256 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 65536 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 16777216 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. See RFC 4880 Section 3.7.1.3. - S2KCount int -} - -// Params contains all the parameters of the s2k packet -type Params struct { - // mode is the mode of s2k function. - // It can be 0 (simple), 1(salted), 3(iterated) - // 2(reserved) 100-110(private/experimental). - mode uint8 - // hashId is the ID of the hash function used in any of the modes - hashId byte - // salt is a byte array to use as a salt in hashing process - salt []byte - // countByte is used to determine how many rounds of hashing are to - // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. - countByte byte -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - return crypto.SHA256 - } - - return c.Hash -} - -// EncodedCount get encoded count -func (c *Config) EncodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 224 // The common case. Corresponding to 16777216 - } - - i := c.S2KCount - - switch { - case i < 65536: - i = 65536 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 65536 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 96; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Generate generates valid parameters from given configuration. -// It will enforce salted + hashed s2k method -func Generate(rand io.Reader, c *Config) (*Params, error) { - hashId, ok := algorithm.HashToHashId(c.Hash) - if !ok { - return nil, errors.UnsupportedError("no such hash") - } - - params := &Params{ - mode: 3, // Enforce iterared + salted method - hashId: hashId, - salt: make([]byte, 8), - countByte: c.EncodedCount(), - } - - if _, err := io.ReadFull(rand, params.salt); err != nil { - return nil, err - } - - return params, nil -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. If the S2K is a special -// GNU extension that indicates that the private key is missing, then the error -// returned is errors.ErrDummyPrivateKey. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - params, err := ParseIntoParams(r) - if err != nil { - return nil, err - } - - return params.Function() -} - -// ParseIntoParams reads a binary specification for a string-to-key -// transformation from r and returns a struct describing the s2k parameters. -func ParseIntoParams(r io.Reader) (params *Params, err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - params = &Params{ - mode: buf[0], - hashId: buf[1], - } - - switch params.mode { - case 0: - return params, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return nil, err - } - - params.salt = buf[:8] - return params, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return nil, err - } - - params.salt = buf[:8] - params.countByte = buf[8] - return params, nil - case 101: - // This is a GNU extension. See - // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 - if _, err = io.ReadFull(r, buf[:4]); err != nil { - return nil, err - } - if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 { - return params, nil - } - return nil, errors.UnsupportedError("GNU S2K extension") - } - - return nil, errors.UnsupportedError("S2K function") -} - -func (params *Params) Dummy() bool { - return params != nil && params.mode == 101 -} - -func (params *Params) Function() (f func(out, in []byte), err error) { - if params.Dummy() { - return nil, errors.ErrDummyPrivateKey("dummy key found") - } - hashObj, ok := algorithm.HashIdToHashWithSha1(params.hashId) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) - } - if !hashObj.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) - } - - switch params.mode { - case 0: - f := func(out, in []byte) { - Simple(out, hashObj.New(), in) - } - - return f, nil - case 1: - f := func(out, in []byte) { - Salted(out, hashObj.New(), in, params.salt) - } - - return f, nil - case 3: - f := func(out, in []byte) { - Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte)) - } - - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -func (params *Params) Serialize(w io.Writer) (err error) { - if _, err = w.Write([]byte{params.mode}); err != nil { - return - } - if _, err = w.Write([]byte{params.hashId}); err != nil { - return - } - if params.Dummy() { - _, err = w.Write(append([]byte("GNU"), 1)) - return - } - if params.mode > 0 { - if _, err = w.Write(params.salt); err != nil { - return - } - if params.mode == 3 { - _, err = w.Write([]byte{params.countByte}) - } - } - return -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - params, err := Generate(rand, c) - if err != nil { - return err - } - err = params.Serialize(w) - if err != nil { - return err - } - - f, err := params.Function() - if err != nil { - return err - } - f(key, passphrase) - return nil -} diff --git a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go deleted file mode 100644 index b3ae72f7bb..0000000000 --- a/openshift/tools/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/armor" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/packet" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) - if !ok { - return errors.InvalidArgumentError("no valid signing keys") - } - if signingKey.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signingKey.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - if _, ok := algorithm.HashToHashId(config.Hash()); !ok { - return errors.InvalidArgumentError("invalid hash function") - } - - sig := createSignaturePacket(signingKey.PublicKey, sigType, config) - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - if _, err = io.Copy(wrappedHash, message); err != nil { - return err - } - - err = sig.Sign(h, signingKey.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - - var w io.WriteCloser - cipherSuite := packet.CipherSuite{ - Cipher: config.Cipher(), - Mode: config.AEAD().Mode(), - } - w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config) - if err != nil { - return - } - - literalData := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literalData, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v[0] == v2[0] && v[1] == v2[1] { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := algorithm.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// EncryptText encrypts a message to a number of recipients and, optionally, -// signs it. Optional information is contained in 'hints', also encrypted, that -// aids the recipients in processing the message. The resulting WriteCloser -// must be closed after the contents of the file have been written. If config -// is nil, sensible defaults will be used. The signing is done in text mode. -func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) -} - -// EncryptSplit encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) -} - -// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := algorithm.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: sigType, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - h, wrappedHash, err := hashForSignature(hash, sigType) - if err != nil { - return nil, err - } - metadata := &packet.LiteralData{ - Format: 't', - FileName: hints.FileName, - Time: epochSeconds, - } - if hints.IsBinary { - metadata.Format = 'b' - } - return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil - } - return literalData, nil -} - -// encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES256), - uint8(packet.CipherAES128), - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA3_256), - hashToHashId(crypto.SHA3_512), - } - - // Prefer GCM if everyone supports it - candidateCipherSuites := [][2]uint8{ - {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)}, - {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)}, - {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)}, - {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)}, - {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)}, - {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}, - } - - candidateCompression := []uint8{ - uint8(packet.CompressionNone), - uint8(packet.CompressionZIP), - uint8(packet.CompressionZLIB), - } - - encryptKeys := make([]Key, len(to)) - - // AEAD is used only if config enables it and every key supports it - aeadSupported := config.AEAD() != nil - - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") - } - - sig := to[i].PrimaryIdentity().SelfSignature - if sig.SEIPDv2 == false { - aeadSupported = false - } - - candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash) - candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites) - candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression) - } - - // In the event that the intersection of supported algorithms is empty we use the ones - // labelled as MUST that every implementation supports. - if len(candidateCiphers) == 0 { - // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3 - candidateCiphers = []uint8{uint8(packet.CipherAES128)} - } - if len(candidateHashes) == 0 { - // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos - candidateHashes = []uint8{hashToHashId(crypto.SHA256)} - } - if len(candidateCipherSuites) == 0 { - // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 - candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}} - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - aeadCipherSuite := packet.CipherSuite{ - Cipher: packet.CipherFunction(candidateCipherSuites[0][0]), - Mode: packet.AEADMode(candidateCipherSuites[0][1]), - } - - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - var payload io.WriteCloser - payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config) - if err != nil { - return - } - - payload, err = handleCompression(payload, candidateCompression, config) - if err != nil { - return nil, err - } - - return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA3_256), - hashToHashId(crypto.SHA3_512), - } - defaultHashes := candidateHashes[0:1] - preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - if len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") - } - - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - wrappedHash hash.Hash - h hash.Hash - signer *packet.PrivateKey - sigType packet.SignatureType - config *packet.Config - metadata *packet.LiteralData // V5 signatures protect document metadata -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.wrappedHash.Write(data) - switch s.sigType { - case packet.SigTypeBinary: - return s.literalData.Write(data) - case packet.SigTypeText: - flag := 0 - return writeCanonical(s.literalData, data, &flag) - } - return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) -} - -func (s signatureWriter) Close() error { - sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config) - sig.Hash = s.hashType - sig.Metadata = s.metadata - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature { - sigLifetimeSecs := config.SigLifetime() - return &packet.Signature{ - Version: signer.Version, - SigType: sigType, - PubKeyAlgo: signer.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.KeyId, - IssuerFingerprint: signer.Fingerprint, - Notations: config.Notations(), - SigLifetimeSecs: &sigLifetimeSecs, - } -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { - data = compressed - confAlgo := config.Compression() - if confAlgo == packet.CompressionNone { - return - } - - // Set algorithm labelled as MUST as fallback - // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4 - finalAlgo := packet.CompressionNone - // if compression specified by config available we will use it - for _, c := range candidateCompression { - if uint8(confAlgo) == c { - finalAlgo = confAlgo - break - } - } - - if finalAlgo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) - if err != nil { - return - } - } - return data, nil -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/CODE_OF_CONDUCT.md b/openshift/tools/vendor/github.com/adrg/xdg/CODE_OF_CONDUCT.md deleted file mode 100644 index 75349e53ee..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, sex characteristics, gender identity and -expression, level of experience, education, socio-economic status, nationality, -personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behaviour that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behaviour by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behaviour and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behaviour. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviour that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behaviour may be -reported by contacting the project team at adrg@epistack.com. All complaints -will be reviewed and investigated and will result in a response that is deemed -necessary and appropriate to the circumstances. The project team is obligated to -maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/openshift/tools/vendor/github.com/adrg/xdg/CONTRIBUTING.md b/openshift/tools/vendor/github.com/adrg/xdg/CONTRIBUTING.md deleted file mode 100644 index 006f146b38..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/CONTRIBUTING.md +++ /dev/null @@ -1,135 +0,0 @@ -# Contributing to this project - -Contributions in the form of pull requests, issues or just general feedback, -are always welcome. Please take a moment to review this document in order to -make the contribution process easy and effective for everyone involved. - -Following these guidelines helps to communicate that you respect the time of -the developers managing and developing this open source project. In return, -they should reciprocate that respect in addressing your issue or assessing -patches and features. - -## Using the issue tracker - -The issue tracker is the preferred channel for [bug reports](#bugs), -[features requests](#features) and [submitting pull -requests](#pull-requests), but please respect the following restrictions: - -* Please **do not** use the issue tracker for personal support requests (use - [Stack Overflow](http://stackoverflow.com) or IRC). -* Please **do not** derail or troll issues. Keep the discussion on topic and - respect the opinions of others. - - -## Bug reports - -A bug is a _demonstrable problem_ that is caused by the code in the repository. -Good bug reports are extremely helpful - thank you! - -Guidelines for bug reports: - -1. **Use the GitHub issue search** — check if the issue has already been - reported. -2. **Check if the issue has been fixed** — try to reproduce it using the - latest `master` or development branch in the repository. -3. **Isolate the problem** — create a reduced test case. - -A good bug report shouldn't leave others needing to chase you up for more -information. Please try to be as detailed as possible in your report. What is -your environment? What steps will reproduce the issue? What browser(s) and OS -experience the problem? What would you expect to be the outcome? All these -details will help people to fix any potential bugs. - -Example: - -> Short and descriptive example bug report title -> -> A summary of the issue and the browser/OS environment in which it occurs. If -> suitable, include the steps required to reproduce the bug. -> -> 1. This is the first step -> 2. This is the second step -> 3. Further steps, etc. -> -> `` - a link to the reduced test case -> -> Any other information you want to share that is relevant to the issue being -> reported. This might include the lines of code that you have identified as -> causing the bug, and potential solutions (and your opinions on their -> merits). - - - -## Feature requests - -Feature requests are welcome. But take a moment to find out whether your idea -fits with the scope and aims of the project. It's up to *you* to make a strong -case to convince the project's developers of the merits of this feature. Please -provide as much detail and context as possible. - - - -## Pull requests - -Good pull requests - patches, improvements, new features - are a fantastic -help. They should remain focused in scope and avoid containing unrelated -commits. - -**Please ask first** before embarking on any significant pull request (e.g. -implementing features, refactoring code, porting to a different language), -otherwise you risk spending a lot of time working on something that the -project's developers might not want to merge into the project. - -Please adhere to the coding conventions used throughout a project (indentation, -accurate comments, etc.) and any other requirements (such as test coverage). - -Follow this process if you'd like your work considered for inclusion in the -project: - -1. [Fork](http://help.github.com/fork-a-repo/) the project, clone your fork, - and configure the remotes: - - ```bash - # Clone your fork of the repo into the current directory - git clone https://github.com// - # Navigate to the newly cloned directory - cd - # Assign the original repo to a remote called "upstream" - git remote add upstream https://github.com// - ``` - -2. If you cloned a while ago, get the latest changes from upstream: - - ```bash - git checkout - git pull upstream - ``` - -3. Create a new topic branch (off the main project development branch) to - contain your feature, change, or fix: - - ```bash - git checkout -b - ``` - -4. Commit your changes in logical chunks and use descriptive commit messages. - Use [interactive rebase](https://help.github.com/articles/interactive-rebase) - to tidy up your commits before making them public. - -5. Locally merge (or rebase) the upstream development branch into your topic branch: - - ```bash - git pull [--rebase] upstream - ``` - -6. Push your topic branch up to your fork: - - ```bash - git push origin - ``` - -7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) - with a clear title and description. - -**IMPORTANT**: By submitting a patch, you agree to allow the project owner to -license your work under the same license as that used by the project. diff --git a/openshift/tools/vendor/github.com/adrg/xdg/LICENSE b/openshift/tools/vendor/github.com/adrg/xdg/LICENSE deleted file mode 100644 index 7307e1b7c8..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Adrian-George Bostan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/adrg/xdg/README.md b/openshift/tools/vendor/github.com/adrg/xdg/README.md deleted file mode 100644 index cf16c5a5ef..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/README.md +++ /dev/null @@ -1,302 +0,0 @@ -

-
- xdg logo -
-

- -

Go implementation of the XDG Base Directory Specification and XDG user directories.

- -

- - Tests status - - - Code coverage - - - pkg.go.dev documentation - - - MIT license - -
- - Go report card - - - Awesome Go - - - GitHub contributors - - - GitHub open issues - - - Buy me a coffee - -

- -Provides an implementation of the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). -The specification defines a set of standard paths for storing application files, -including data and configuration files. For portability and flexibility reasons, -applications should use the XDG defined locations instead of hardcoding paths. - -The package also includes the locations of well known [user directories](https://wiki.archlinux.org/index.php/XDG_user_directories), -support for the non-standard `XDG_BIN_HOME` directory, as well as other common directories such as fonts and applications. - -The current implementation supports **most flavors of Unix**, **Windows**, **macOS** and **Plan 9**. -On Windows, where XDG environment variables are not usually set, the package uses [Known Folders](https://docs.microsoft.com/en-us/windows/win32/shell/known-folders) -as defaults. Therefore, appropriate locations are used for common [folders](https://docs.microsoft.com/en-us/windows/win32/shell/knownfolderid) which may have been redirected. - -See usage [examples](#usage) below. Full documentation can be found at https://pkg.go.dev/github.com/adrg/xdg. - -## Installation - go get github.com/adrg/xdg - -## Default locations - -The package defines sensible defaults for XDG variables which are empty or not -present in the environment. - -- On Unix-like operating systems, XDG environment variables are typically defined. -Appropriate default locations are used for the environment variables which are not set. -- On Windows, XDG environment variables are usually not set. If that is the case, -the package relies on the appropriate [Known Folders](https://docs.microsoft.com/en-us/windows/win32/shell/knownfolderid). -Sensible fallback locations are used for the folders which are not set. - -### XDG Base Directory - -
- Unix-like operating systems -
- -| |

Unix

|

macOS

|

Plan 9

| -| :------------------------------------------------------------: | :-----------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | -| XDG_DATA_HOME | ~/.local/share | ~/Library/Application Support | $home/lib | -| XDG_DATA_DIRS | /usr/local/share
/usr/share | /Library/Application Support~/.local/share | /lib | -| XDG_CONFIG_HOME | ~/.config | ~/Library/Application Support | $home/lib | -| XDG_CONFIG_DIRS | /etc/xdg | ~/Library/Preferences
/Library/Application Support
/Library/Preferences
~/.config | /lib | -| XDG_STATE_HOME | ~/.local/state | ~/Library/Application Support | $home/lib/state | -| XDG_CACHE_HOME | ~/.cache | ~/Library/Caches | $home/lib/cache | -| XDG_RUNTIME_DIR | /run/user/$UID | ~/Library/Application Support | /tmp | -| XDG_BIN_HOME | ~/.local/bin | ~/.local/bin | $home/bin | - -
- -
- Microsoft Windows -
- -| |

Known Folder(s)

|

Fallback(s)

| -| :------------------------------------------------------------: | :---------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------: | -| XDG_DATA_HOME | LocalAppData | %LOCALAPPDATA% | -| XDG_DATA_DIRS | RoamingAppData
ProgramData | %APPADATA%
%ProgramData% | -| XDG_CONFIG_HOME | LocalAppData | %LOCALAPPDATA% | -| XDG_CONFIG_DIRS | ProgramData
RoamingAppData | %ProgramData%
%APPDATA% | -| XDG_STATE_HOME | LocalAppData | %LOCALAPPDATA% | -| XDG_CACHE_HOME | LocalAppData\cache | %LOCALAPPDATA%\cache | -| XDG_RUNTIME_DIR | LocalAppData | %LOCALAPPDATA% | -| XDG_BIN_HOME | UserProgramFiles | %LOCALAPPDATA%\Programs | - -
- -### XDG user directories - -XDG user directories environment variables are usually **not** set on most -operating systems. However, if they are present in the environment, they take -precedence. Appropriate fallback locations are used for the environment -variables which are not set. - -- On Unix-like operating systems (except macOS and Plan 9), the package reads the [user-dirs.dirs](https://man.archlinux.org/man/user-dirs.dirs.5.en) config file. -- On Windows, the package uses the appropriate [Known Folders](https://docs.microsoft.com/en-us/windows/win32/shell/knownfolderid). - -Lastly, default locations are used for any user directories which are not set, -as shown in the following tables. - -
- Unix-like operating systems -
- -| |

Unix

|

macOS

|

Plan 9

| -| :--------------------------------------------------------------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | -| XDG_DESKTOP_DIR | ~/Desktop | ~/Desktop | $home/desktop | -| XDG_DOWNLOAD_DIR | ~/Downloads | ~/Downloads | $home/downloads | -| XDG_DOCUMENTS_DIR | ~/Documents | ~/Documents | $home/documents | -| XDG_MUSIC_DIR | ~/Music | ~/Music | $home/music | -| XDG_PICTURES_DIR | ~/Pictures | ~/Pictures | $home/pictures | -| XDG_VIDEOS_DIR | ~/Videos | ~/Movies | $home/videos | -| XDG_TEMPLATES_DIR | ~/Templates | ~/Templates | $home/templates | -| XDG_PUBLICSHARE_DIR | ~/Public | ~/Public | $home/public | - -
- -
- Microsoft Windows -
- -| |

Known Folder(s)

|

Fallback(s)

| -| :--------------------------------------------------------------: | :-----------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | -| XDG_DESKTOP_DIR | Desktop | %USERPROFILE%\Desktop | -| XDG_DOWNLOAD_DIR | Downloads | %USERPROFILE%\Downloads | -| XDG_DOCUMENTS_DIR | Documents | %USERPROFILE%\Documents | -| XDG_MUSIC_DIR | Music | %USERPROFILE%\Music | -| XDG_PICTURES_DIR | Pictures | %USERPROFILE%\Pictures | -| XDG_VIDEOS_DIR | Videos | %USERPROFILE%\Videos | -| XDG_TEMPLATES_DIR | Templates | %APPDATA%\Microsoft\Windows\Templates | -| XDG_PUBLICSHARE_DIR | Public | %PUBLIC% | - -
- -### Other directories - -
- Unix-like operating systems -
- -| |

Unix

|

macOS

|

Plan 9

| -| :-----------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------: | -| Home | $HOME | $HOME | $home | -| Applications | $XDG_DATA_HOME/applications
~/.local/share/applications
/usr/local/share/applications
/usr/share/applications
$XDG_DATA_DIRS/applications | /Applications | $home/bin
/bin | -| Fonts | $XDG_DATA_HOME/fonts
~/.fonts
~/.local/share/fonts
/usr/local/share/fonts
/usr/share/fonts
$XDG_DATA_DIRS/fonts | ~/Library/Fonts
/Library/Fonts
/System/Library/Fonts
/Network/Library/Fonts | $home/lib/font
/lib/font | - -
- -
- Microsoft Windows -
- -| |

Known Folder(s)

|

Fallback(s)

| -| :-----------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Home | Profile | %USERPROFILE% | -| Applications | Programs
CommonPrograms
ProgramFiles
ProgramFilesCommon
UserProgramFiles
UserProgramFilesCommon | %APPDATA%\Microsoft\Windows\Start Menu\Programs
%ProgramData%\Microsoft\Windows\Start Menu\Programs
%ProgramFiles%
%ProgramFiles%\Common Files
%LOCALAPPDATA%\Programs
%LOCALAPPDATA%\Programs\Common| -| Fonts | Fonts | %SystemRoot%\Fonts
%LOCALAPPDATA%\Microsoft\Windows\Fonts | - -
- -## Usage - -#### XDG Base Directory - -```go -package main - -import ( - "log" - - "github.com/adrg/xdg" -) - -func main() { - // XDG Base Directory paths. - log.Println("Home data directory:", xdg.DataHome) - log.Println("Data directories:", xdg.DataDirs) - log.Println("Home config directory:", xdg.ConfigHome) - log.Println("Config directories:", xdg.ConfigDirs) - log.Println("Home state directory:", xdg.StateHome) - log.Println("Cache directory:", xdg.CacheHome) - log.Println("Runtime directory:", xdg.RuntimeDir) - log.Println("Home binaries directory:", xdg.BinHome) - - // Other common directories. - log.Println("Home directory:", xdg.Home) - log.Println("Application directories:", xdg.ApplicationDirs) - log.Println("Font directories:", xdg.FontDirs) - - // Obtain a suitable location for application config files. - // ConfigFile takes one parameter which must contain the name of the file, - // but it can also contain a set of parent directories. If the directories - // don't exist, they will be created relative to the base config directory. - // It is recommended for files to be saved inside an application directory - // relative to the base directory rather than directly inside the base - // directory (e.g. `appname/config.yaml` instead of `appname-config.yaml`). - configFilePath, err := xdg.ConfigFile("appname/config.yaml") - if err != nil { - log.Fatal(err) - } - log.Println("Save the config file at:", configFilePath) - - // For other types of application files use: - // xdg.DataFile() - // xdg.StateFile() - // xdg.CacheFile() - // xdg.RuntimeFile() - - // Finding application config files. - // SearchConfigFile takes one parameter which must contain the name of - // the file, but it can also contain a set of parent directories relative - // to the config search paths (xdg.ConfigHome and xdg.ConfigDirs). - configFilePath, err = xdg.SearchConfigFile("appname/config.yaml") - if err != nil { - log.Fatal(err) - } - log.Println("Config file was found at:", configFilePath) - - // For other types of application files use: - // xdg.SearchDataFile() - // xdg.SearchStateFile() - // xdg.SearchCacheFile() - // xdg.SearchRuntimeFile() -} -``` - -#### XDG user directories - -```go -package main - -import ( - "log" - - "github.com/adrg/xdg" -) - -func main() { - // XDG user directories. - log.Println("Desktop directory:", xdg.UserDirs.Desktop) - log.Println("Download directory:", xdg.UserDirs.Download) - log.Println("Documents directory:", xdg.UserDirs.Documents) - log.Println("Music directory:", xdg.UserDirs.Music) - log.Println("Pictures directory:", xdg.UserDirs.Pictures) - log.Println("Videos directory:", xdg.UserDirs.Videos) - log.Println("Templates directory:", xdg.UserDirs.Templates) - log.Println("Public directory:", xdg.UserDirs.PublicShare) -} -``` - -## Stargazers over time - -[![Stargazers over time](https://starchart.cc/adrg/xdg.svg)](https://starchart.cc/adrg/xdg) - -## Contributing - -Contributions in the form of pull requests, issues or just general feedback, -are always welcome. -See [CONTRIBUTING.MD](CONTRIBUTING.md). - -**Contributors**: -[adrg](https://github.com/adrg), -[wichert](https://github.com/wichert), -[bouncepaw](https://github.com/bouncepaw), -[gabriel-vasile](https://github.com/gabriel-vasile), -[KalleDK](https://github.com/KalleDK), -[nvkv](https://github.com/nvkv), -[djdv](https://github.com/djdv), -[rrjjvv](https://github.com/rrjjvv), -[GreyXor](https://github.com/GreyXor), -[Rican7](https://github.com/Rican7), -[nothub](https://github.com/nothub). - -## References - -For more information see: -* [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html) -* [XDG user directories](https://wiki.archlinux.org/index.php/XDG_user_directories) -* [Windows Known Folders](https://docs.microsoft.com/en-us/windows/win32/shell/knownfolderid) - -## License - -Copyright (c) 2014 Adrian-George Bostan. - -This project is licensed under the [MIT license](https://opensource.org/licenses/MIT). -See [LICENSE](LICENSE) for more details. diff --git a/openshift/tools/vendor/github.com/adrg/xdg/base_dirs.go b/openshift/tools/vendor/github.com/adrg/xdg/base_dirs.go deleted file mode 100644 index 65f3e3fa36..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/base_dirs.go +++ /dev/null @@ -1,82 +0,0 @@ -package xdg - -import ( - "os" - - "github.com/adrg/xdg/internal/pathutil" -) - -// XDG Base Directory environment variables. -const ( - envDataHome = "XDG_DATA_HOME" - envDataDirs = "XDG_DATA_DIRS" - envConfigHome = "XDG_CONFIG_HOME" - envConfigDirs = "XDG_CONFIG_DIRS" - envStateHome = "XDG_STATE_HOME" - envCacheHome = "XDG_CACHE_HOME" - envRuntimeDir = "XDG_RUNTIME_DIR" - - // Non-standard. - envBinHome = "XDG_BIN_HOME" -) - -type baseDirectories struct { - dataHome string - data []string - configHome string - config []string - stateHome string - cacheHome string - runtime string - - // Non-standard. - binHome string - fonts []string - applications []string -} - -func (bd baseDirectories) dataFile(relPath string) (string, error) { - return pathutil.Create(relPath, append([]string{bd.dataHome}, bd.data...)) -} - -func (bd baseDirectories) configFile(relPath string) (string, error) { - return pathutil.Create(relPath, append([]string{bd.configHome}, bd.config...)) -} - -func (bd baseDirectories) stateFile(relPath string) (string, error) { - return pathutil.Create(relPath, []string{bd.stateHome}) -} - -func (bd baseDirectories) cacheFile(relPath string) (string, error) { - return pathutil.Create(relPath, []string{bd.cacheHome}) -} - -func (bd baseDirectories) runtimeFile(relPath string) (string, error) { - var paths []string - for _, p := range pathutil.Unique([]string{bd.runtime, os.TempDir()}) { - if pathutil.Exists(p) { - paths = append(paths, p) - } - } - return pathutil.Create(relPath, paths) -} - -func (bd baseDirectories) searchDataFile(relPath string) (string, error) { - return pathutil.Search(relPath, append([]string{bd.dataHome}, bd.data...)) -} - -func (bd baseDirectories) searchConfigFile(relPath string) (string, error) { - return pathutil.Search(relPath, append([]string{bd.configHome}, bd.config...)) -} - -func (bd baseDirectories) searchStateFile(relPath string) (string, error) { - return pathutil.Search(relPath, []string{bd.stateHome}) -} - -func (bd baseDirectories) searchCacheFile(relPath string) (string, error) { - return pathutil.Search(relPath, []string{bd.cacheHome}) -} - -func (bd baseDirectories) searchRuntimeFile(relPath string) (string, error) { - return pathutil.Search(relPath, pathutil.Unique([]string{bd.runtime, os.TempDir()})) -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/codecov.yml b/openshift/tools/vendor/github.com/adrg/xdg/codecov.yml deleted file mode 100644 index 54ee338fd7..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/codecov.yml +++ /dev/null @@ -1,11 +0,0 @@ -coverage: - status: - project: - default: - target: 90% - threshold: 1% - patch: - default: - target: 100% -ignore: - - "paths_plan9.go" diff --git a/openshift/tools/vendor/github.com/adrg/xdg/doc.go b/openshift/tools/vendor/github.com/adrg/xdg/doc.go deleted file mode 100644 index 22ba4782e7..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/doc.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Package xdg provides an implementation of the XDG Base Directory Specification. -The specification defines a set of standard paths for storing application files -including data and configuration files. For portability and flexibility reasons, -applications should use the XDG defined locations instead of hardcoding paths. -The package also includes the locations of well known user directories. - -The current implementation supports most flavors of Unix, Windows, Mac OS and Plan 9. - - For more information regarding the XDG Base Directory Specification see: - https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html - - For more information regarding the XDG user directories see: - https://wiki.archlinux.org/index.php/XDG_user_directories - - For more information regarding the Windows Known Folders see: - https://docs.microsoft.com/en-us/windows/win32/shell/known-folders - -# Usage - -XDG Base Directory - - package main - - import ( - "log" - - "github.com/adrg/xdg" - ) - - func main() { - // XDG Base Directory paths. - log.Println("Home data directory:", xdg.DataHome) - log.Println("Data directories:", xdg.DataDirs) - log.Println("Home config directory:", xdg.ConfigHome) - log.Println("Config directories:", xdg.ConfigDirs) - log.Println("Home state directory:", xdg.StateHome) - log.Println("Cache directory:", xdg.CacheHome) - log.Println("Runtime directory:", xdg.RuntimeDir) - log.Println("Home binaries directory:", xdg.BinHome) - - // Other common directories. - log.Println("Home directory:", xdg.Home) - log.Println("Application directories:", xdg.ApplicationDirs) - log.Println("Font directories:", xdg.FontDirs) - - // Obtain a suitable location for application config files. - // ConfigFile takes one parameter which must contain the name of the file, - // but it can also contain a set of parent directories. If the directories - // don't exist, they will be created relative to the base config directory. - // It is recommended for files to be saved inside an application directory - // relative to the base directory rather than directly inside the base - // directory (e.g. `appname/config.yaml` instead of `appname-config.yaml`). - configFilePath, err := xdg.ConfigFile("appname/config.yaml") - if err != nil { - log.Fatal(err) - } - log.Println("Save the config file at:", configFilePath) - - // For other types of application files use: - // xdg.DataFile() - // xdg.StateFile() - // xdg.CacheFile() - // xdg.RuntimeFile() - - // Finding application config files. - // SearchConfigFile takes one parameter which must contain the name of - // the file, but it can also contain a set of parent directories relative - // to the config search paths (xdg.ConfigHome and xdg.ConfigDirs). - configFilePath, err = xdg.SearchConfigFile("appname/config.yaml") - if err != nil { - log.Fatal(err) - } - log.Println("Config file was found at:", configFilePath) - - // For other types of application files use: - // xdg.SearchDataFile() - // xdg.SearchStateFile() - // xdg.SearchCacheFile() - // xdg.SearchRuntimeFile() - } - -XDG user directories - - package main - - import ( - "log" - - "github.com/adrg/xdg" - ) - - func main() { - // XDG user directories. - log.Println("Desktop directory:", xdg.UserDirs.Desktop) - log.Println("Download directory:", xdg.UserDirs.Download) - log.Println("Documents directory:", xdg.UserDirs.Documents) - log.Println("Music directory:", xdg.UserDirs.Music) - log.Println("Pictures directory:", xdg.UserDirs.Pictures) - log.Println("Videos directory:", xdg.UserDirs.Videos) - log.Println("Templates directory:", xdg.UserDirs.Templates) - log.Println("Public directory:", xdg.UserDirs.PublicShare) - } -*/ -package xdg diff --git a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil.go b/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil.go deleted file mode 100644 index 981580d198..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil.go +++ /dev/null @@ -1,114 +0,0 @@ -package pathutil - -import ( - "fmt" - "os" - "path/filepath" -) - -// Unique eliminates the duplicate paths from the provided slice and returns -// the result. The paths are expanded using the `ExpandHome` function and only -// absolute paths are kept. The items in the output slice are in the order in -// which they occur in the input slice. -func Unique(paths []string) []string { - var ( - uniq []string - registry = map[string]struct{}{} - ) - - for _, p := range paths { - if p = ExpandHome(p); p != "" && filepath.IsAbs(p) { - if _, ok := registry[p]; ok { - continue - } - - registry[p] = struct{}{} - uniq = append(uniq, p) - } - } - - return uniq -} - -// First returns the first absolute path from the provided slice. -// The paths in the input slice are expanded using the `ExpandHome` function. -func First(paths []string) string { - for _, p := range paths { - if p = ExpandHome(p); p != "" && filepath.IsAbs(p) { - return p - } - } - - return "" -} - -// Create returns a suitable location relative to which the file with the -// specified `name` can be written. The first path from the provided `paths` -// slice which is successfully created (or already exists) is used as a base -// path for the file. The `name` parameter should contain the name of the file -// which is going to be written in the location returned by this function, but -// it can also contain a set of parent directories, which will be created -// relative to the selected parent path. -func Create(name string, paths []string) (string, error) { - searchedPaths := make([]string, 0, len(paths)) - for _, p := range paths { - p = filepath.Join(p, name) - - dir := filepath.Dir(p) - if Exists(dir) { - return p, nil - } - if err := os.MkdirAll(dir, os.ModeDir|0o700); err == nil { - return p, nil - } - - searchedPaths = append(searchedPaths, dir) - } - - return "", fmt.Errorf("could not create any of the following paths: %v", - searchedPaths) -} - -// Search searches for the file with the specified `name` in the provided -// slice of `paths`. The `name` parameter must contain the name of the file, -// but it can also contain a set of parent directories. -func Search(name string, paths []string) (string, error) { - searchedPaths := make([]string, 0, len(paths)) - for _, p := range paths { - p = filepath.Join(p, name) - if Exists(p) { - return p, nil - } - - searchedPaths = append(searchedPaths, filepath.Dir(p)) - } - - return "", fmt.Errorf("could not locate `%s` in any of the following paths: %v", - filepath.Base(name), searchedPaths) -} - -// EnvPath returns the value of the environment variable with the specified -// `name` if it is an absolute path, or the first absolute fallback path. -// All paths are expanded using the `ExpandHome` function. -func EnvPath(name string, fallbackPaths ...string) string { - dir := ExpandHome(os.Getenv(name)) - if dir != "" && filepath.IsAbs(dir) { - return dir - } - - return First(fallbackPaths) -} - -// EnvPathList reads the value of the environment variable with the specified -// `name` and attempts to extract a list of absolute paths from it. If there -// are none, a list of absolute fallback paths is returned instead. Duplicate -// paths are removed from the returned slice. All paths are expanded using the -// `ExpandHome` function. -func EnvPathList(name string, fallbackPaths ...string) []string { - dirs := Unique(filepath.SplitList(os.Getenv(name))) - if len(dirs) != 0 { - return dirs - } - - return Unique(fallbackPaths) -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_plan9.go b/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_plan9.go deleted file mode 100644 index a6e378a2de..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_plan9.go +++ /dev/null @@ -1,40 +0,0 @@ -package pathutil - -import ( - "errors" - "io/fs" - "os" - "path/filepath" - "strings" -) - -// UserHomeDir returns the home directory of the current user. -func UserHomeDir() string { - if home := os.Getenv("home"); home != "" { - return home - } - - return "/" -} - -// Exists returns true if the specified path exists. -func Exists(path string) bool { - _, err := os.Stat(path) - return err == nil || errors.Is(err, fs.ErrExist) -} - -// ExpandHome substitutes `~` and `$home` at the start of the specified `path`. -func ExpandHome(path string) string { - home := UserHomeDir() - if path == "" || home == "" { - return path - } - if path[0] == '~' { - return filepath.Join(home, path[1:]) - } - if strings.HasPrefix(path, "$home") { - return filepath.Join(home, path[5:]) - } - - return path -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_unix.go b/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_unix.go deleted file mode 100644 index 3114a8c0a4..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_unix.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || nacl || linux || netbsd || openbsd || solaris - -package pathutil - -import ( - "errors" - "io/fs" - "os" - "path/filepath" - "strings" -) - -// UserHomeDir returns the home directory of the current user. -func UserHomeDir() string { - if home := os.Getenv("HOME"); home != "" { - return home - } - - return "/" -} - -// Exists returns true if the specified path exists. -func Exists(path string) bool { - _, err := os.Stat(path) - return err == nil || errors.Is(err, fs.ErrExist) -} - -// ExpandHome substitutes `~` and `$HOME` at the start of the specified `path`. -func ExpandHome(path string) string { - home := UserHomeDir() - if path == "" || home == "" { - return path - } - if path[0] == '~' { - return filepath.Join(home, path[1:]) - } - if strings.HasPrefix(path, "$HOME") { - return filepath.Join(home, path[5:]) - } - - return path -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_windows.go b/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_windows.go deleted file mode 100644 index 5b18155f1d..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/internal/pathutil/pathutil_windows.go +++ /dev/null @@ -1,71 +0,0 @@ -package pathutil - -import ( - "errors" - "io/fs" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/windows" -) - -// UserHomeDir returns the home directory of the current user. -func UserHomeDir() string { - return KnownFolder(windows.FOLDERID_Profile, []string{"USERPROFILE"}, nil) -} - -// Exists returns true if the specified path exists. -func Exists(path string) bool { - fi, err := os.Lstat(path) - if fi != nil && fi.Mode()&os.ModeSymlink != 0 { - _, err = filepath.EvalSymlinks(path) - } - - return err == nil || errors.Is(err, fs.ErrExist) -} - -// ExpandHome substitutes `%USERPROFILE%` at the start of the specified `path`. -func ExpandHome(path string) string { - home := UserHomeDir() - if path == "" || home == "" { - return path - } - if strings.HasPrefix(path, `%USERPROFILE%`) { - return filepath.Join(home, path[13:]) - } - - return path -} - -// KnownFolder returns the location of the folder with the specified ID. -// If that fails, the folder location is determined by reading the provided -// environment variables (the first non-empty read value is returned). -// If that fails as well, the first non-empty fallback is returned. -// If all of the above fails, the function returns an empty string. -func KnownFolder(id *windows.KNOWNFOLDERID, envVars []string, fallbacks []string) string { - if id != nil { - flags := []uint32{windows.KF_FLAG_DEFAULT, windows.KF_FLAG_DEFAULT_PATH} - for _, flag := range flags { - p, _ := windows.KnownFolderPath(id, flag|windows.KF_FLAG_DONT_VERIFY) - if p != "" { - return p - } - } - } - - for _, envVar := range envVars { - p := os.Getenv(envVar) - if p != "" { - return p - } - } - - for _, fallback := range fallbacks { - if fallback != "" { - return fallback - } - } - - return "" -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/internal/userdirs/config_unix.go b/openshift/tools/vendor/github.com/adrg/xdg/internal/userdirs/config_unix.go deleted file mode 100644 index ac7efb9648..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/internal/userdirs/config_unix.go +++ /dev/null @@ -1,82 +0,0 @@ -//go:build aix || dragonfly || freebsd || (js && wasm) || nacl || linux || netbsd || openbsd || solaris - -package userdirs - -import ( - "bufio" - "io" - "os" - "strings" - - "github.com/adrg/xdg/internal/pathutil" -) - -// ParseConfigFile parses the user directories config file at the -// specified location. -func ParseConfigFile(name string) (*Directories, error) { - f, err := os.Open(name) - if err != nil { - return nil, err - } - defer f.Close() - - return ParseConfig(f) -} - -// ParseConfig parses the user directories config file contained in -// the provided reader. -func ParseConfig(r io.Reader) (*Directories, error) { - dirs := &Directories{} - fieldsMap := map[string]*string{ - EnvDesktopDir: &dirs.Desktop, - EnvDownloadDir: &dirs.Download, - EnvDocumentsDir: &dirs.Documents, - EnvMusicDir: &dirs.Music, - EnvPicturesDir: &dirs.Pictures, - EnvVideosDir: &dirs.Videos, - EnvTemplatesDir: &dirs.Templates, - EnvPublicShareDir: &dirs.PublicShare, - } - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if len(line) == 0 || line[0] == '#' { - continue - } - if !strings.HasPrefix(line, "XDG_") { - continue - } - - parts := strings.Split(line, "=") - if len(parts) < 2 { - continue - } - - // Parse key. - field, ok := fieldsMap[strings.TrimSpace(parts[0])] - if !ok { - continue - } - - // Parse value. - runes := []rune(strings.TrimSpace(parts[1])) - - lenRunes := len(runes) - if lenRunes <= 2 || runes[0] != '"' { - continue - } - - for i := 1; i < lenRunes; i++ { - if runes[i] == '"' { - *field = pathutil.ExpandHome(string(runes[1:i])) - break - } - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - - return dirs, nil -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/internal/userdirs/userdirs.go b/openshift/tools/vendor/github.com/adrg/xdg/internal/userdirs/userdirs.go deleted file mode 100644 index b3c30cf90f..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/internal/userdirs/userdirs.go +++ /dev/null @@ -1,40 +0,0 @@ -package userdirs - -// XDG user directories environment variables. -const ( - EnvDesktopDir = "XDG_DESKTOP_DIR" - EnvDownloadDir = "XDG_DOWNLOAD_DIR" - EnvDocumentsDir = "XDG_DOCUMENTS_DIR" - EnvMusicDir = "XDG_MUSIC_DIR" - EnvPicturesDir = "XDG_PICTURES_DIR" - EnvVideosDir = "XDG_VIDEOS_DIR" - EnvTemplatesDir = "XDG_TEMPLATES_DIR" - EnvPublicShareDir = "XDG_PUBLICSHARE_DIR" -) - -// Directories defines the locations of well known user directories. -type Directories struct { - // Desktop defines the location of the user's desktop directory. - Desktop string - - // Download defines a suitable location for user downloaded files. - Download string - - // Documents defines a suitable location for user document files. - Documents string - - // Music defines a suitable location for user audio files. - Music string - - // Pictures defines a suitable location for user image files. - Pictures string - - // VideosDir defines a suitable location for user video files. - Videos string - - // Templates defines a suitable location for user template files. - Templates string - - // PublicShare defines a suitable location for user shared files. - PublicShare string -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/paths_darwin.go b/openshift/tools/vendor/github.com/adrg/xdg/paths_darwin.go deleted file mode 100644 index 3b3d05f8fe..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/paths_darwin.go +++ /dev/null @@ -1,60 +0,0 @@ -package xdg - -import ( - "path/filepath" - - "github.com/adrg/xdg/internal/pathutil" - "github.com/adrg/xdg/internal/userdirs" -) - -func initDirs(home string) { - initBaseDirs(home) - initUserDirs(home) -} - -func initBaseDirs(home string) { - homeAppSupport := filepath.Join(home, "Library", "Application Support") - rootAppSupport := "/Library/Application Support" - - // Initialize standard directories. - baseDirs.dataHome = pathutil.EnvPath(envDataHome, homeAppSupport) - baseDirs.data = pathutil.EnvPathList(envDataDirs, - rootAppSupport, - filepath.Join(home, ".local", "share"), - ) - baseDirs.configHome = pathutil.EnvPath(envConfigHome, homeAppSupport) - baseDirs.config = pathutil.EnvPathList(envConfigDirs, - filepath.Join(home, "Library", "Preferences"), - rootAppSupport, - "/Library/Preferences", - filepath.Join(home, ".config"), - ) - baseDirs.stateHome = pathutil.EnvPath(envStateHome, homeAppSupport) - baseDirs.cacheHome = pathutil.EnvPath(envCacheHome, filepath.Join(home, "Library", "Caches")) - baseDirs.runtime = pathutil.EnvPath(envRuntimeDir, homeAppSupport) - - // Initialize non-standard directories. - baseDirs.binHome = pathutil.EnvPath(envBinHome, filepath.Join(home, ".local", "bin")) - - baseDirs.applications = []string{ - "/Applications", - } - - baseDirs.fonts = []string{ - filepath.Join(home, "Library/Fonts"), - "/Library/Fonts", - "/System/Library/Fonts", - "/Network/Library/Fonts", - } -} - -func initUserDirs(home string) { - UserDirs.Desktop = pathutil.EnvPath(userdirs.EnvDesktopDir, filepath.Join(home, "Desktop")) - UserDirs.Download = pathutil.EnvPath(userdirs.EnvDownloadDir, filepath.Join(home, "Downloads")) - UserDirs.Documents = pathutil.EnvPath(userdirs.EnvDocumentsDir, filepath.Join(home, "Documents")) - UserDirs.Music = pathutil.EnvPath(userdirs.EnvMusicDir, filepath.Join(home, "Music")) - UserDirs.Pictures = pathutil.EnvPath(userdirs.EnvPicturesDir, filepath.Join(home, "Pictures")) - UserDirs.Videos = pathutil.EnvPath(userdirs.EnvVideosDir, filepath.Join(home, "Movies")) - UserDirs.Templates = pathutil.EnvPath(userdirs.EnvTemplatesDir, filepath.Join(home, "Templates")) - UserDirs.PublicShare = pathutil.EnvPath(userdirs.EnvPublicShareDir, filepath.Join(home, "Public")) -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/paths_plan9.go b/openshift/tools/vendor/github.com/adrg/xdg/paths_plan9.go deleted file mode 100644 index d0c36baa8b..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/paths_plan9.go +++ /dev/null @@ -1,51 +0,0 @@ -package xdg - -import ( - "path/filepath" - - "github.com/adrg/xdg/internal/pathutil" - "github.com/adrg/xdg/internal/userdirs" -) - -func initDirs(home string) { - initBaseDirs(home) - initUserDirs(home) -} - -func initBaseDirs(home string) { - homeLibDir := filepath.Join(home, "lib") - rootLibDir := "/lib" - - // Initialize standard directories. - baseDirs.dataHome = pathutil.EnvPath(envDataHome, homeLibDir) - baseDirs.data = pathutil.EnvPathList(envDataDirs, rootLibDir) - baseDirs.configHome = pathutil.EnvPath(envConfigHome, homeLibDir) - baseDirs.config = pathutil.EnvPathList(envConfigDirs, rootLibDir) - baseDirs.stateHome = pathutil.EnvPath(envStateHome, filepath.Join(homeLibDir, "state")) - baseDirs.cacheHome = pathutil.EnvPath(envCacheHome, filepath.Join(homeLibDir, "cache")) - baseDirs.runtime = pathutil.EnvPath(envRuntimeDir, "/tmp") - - // Initialize non-standard directories. - baseDirs.binHome = pathutil.EnvPath(envBinHome, filepath.Join(home, "bin")) - - baseDirs.applications = []string{ - filepath.Join(home, "bin"), - "/bin", - } - - baseDirs.fonts = []string{ - filepath.Join(homeLibDir, "font"), - "/lib/font", - } -} - -func initUserDirs(home string) { - UserDirs.Desktop = pathutil.EnvPath(userdirs.EnvDesktopDir, filepath.Join(home, "desktop")) - UserDirs.Download = pathutil.EnvPath(userdirs.EnvDownloadDir, filepath.Join(home, "downloads")) - UserDirs.Documents = pathutil.EnvPath(userdirs.EnvDocumentsDir, filepath.Join(home, "documents")) - UserDirs.Music = pathutil.EnvPath(userdirs.EnvMusicDir, filepath.Join(home, "music")) - UserDirs.Pictures = pathutil.EnvPath(userdirs.EnvPicturesDir, filepath.Join(home, "pictures")) - UserDirs.Videos = pathutil.EnvPath(userdirs.EnvVideosDir, filepath.Join(home, "videos")) - UserDirs.Templates = pathutil.EnvPath(userdirs.EnvTemplatesDir, filepath.Join(home, "templates")) - UserDirs.PublicShare = pathutil.EnvPath(userdirs.EnvPublicShareDir, filepath.Join(home, "public")) -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/paths_unix.go b/openshift/tools/vendor/github.com/adrg/xdg/paths_unix.go deleted file mode 100644 index 8a07809b9c..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/paths_unix.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build aix || dragonfly || freebsd || (js && wasm) || nacl || linux || netbsd || openbsd || solaris - -package xdg - -import ( - "os" - "path/filepath" - "strconv" - - "github.com/adrg/xdg/internal/pathutil" - "github.com/adrg/xdg/internal/userdirs" -) - -func initDirs(home string) { - initBaseDirs(home) - initUserDirs(home, baseDirs.configHome) -} - -func initBaseDirs(home string) { - // Initialize standard directories. - baseDirs.dataHome = pathutil.EnvPath(envDataHome, filepath.Join(home, ".local", "share")) - baseDirs.data = pathutil.EnvPathList(envDataDirs, "/usr/local/share", "/usr/share") - baseDirs.configHome = pathutil.EnvPath(envConfigHome, filepath.Join(home, ".config")) - baseDirs.config = pathutil.EnvPathList(envConfigDirs, "/etc/xdg") - baseDirs.stateHome = pathutil.EnvPath(envStateHome, filepath.Join(home, ".local", "state")) - baseDirs.cacheHome = pathutil.EnvPath(envCacheHome, filepath.Join(home, ".cache")) - baseDirs.runtime = pathutil.EnvPath(envRuntimeDir, filepath.Join("/run/user", strconv.Itoa(os.Getuid()))) - - // Initialize non-standard directories. - baseDirs.binHome = pathutil.EnvPath(envBinHome, filepath.Join(home, ".local", "bin")) - - appDirs := []string{ - filepath.Join(baseDirs.dataHome, "applications"), - filepath.Join(home, ".local/share/applications"), - "/usr/local/share/applications", - "/usr/share/applications", - } - - fontDirs := []string{ - filepath.Join(baseDirs.dataHome, "fonts"), - filepath.Join(home, ".fonts"), - filepath.Join(home, ".local/share/fonts"), - "/usr/local/share/fonts", - "/usr/share/fonts", - } - - for _, dir := range baseDirs.data { - appDirs = append(appDirs, filepath.Join(dir, "applications")) - fontDirs = append(fontDirs, filepath.Join(dir, "fonts")) - } - - baseDirs.applications = pathutil.Unique(appDirs) - baseDirs.fonts = pathutil.Unique(fontDirs) -} - -func initUserDirs(home, configHome string) { - dirs, err := userdirs.ParseConfigFile(filepath.Join(configHome, "user-dirs.dirs")) - if err != nil { - dirs = &UserDirectories{} - } - - UserDirs.Desktop = pathutil.EnvPath(userdirs.EnvDesktopDir, dirs.Desktop, filepath.Join(home, "Desktop")) - UserDirs.Download = pathutil.EnvPath(userdirs.EnvDownloadDir, dirs.Download, filepath.Join(home, "Downloads")) - UserDirs.Documents = pathutil.EnvPath(userdirs.EnvDocumentsDir, dirs.Documents, filepath.Join(home, "Documents")) - UserDirs.Music = pathutil.EnvPath(userdirs.EnvMusicDir, dirs.Music, filepath.Join(home, "Music")) - UserDirs.Pictures = pathutil.EnvPath(userdirs.EnvPicturesDir, dirs.Pictures, filepath.Join(home, "Pictures")) - UserDirs.Videos = pathutil.EnvPath(userdirs.EnvVideosDir, dirs.Videos, filepath.Join(home, "Videos")) - UserDirs.Templates = pathutil.EnvPath(userdirs.EnvTemplatesDir, dirs.Templates, filepath.Join(home, "Templates")) - UserDirs.PublicShare = pathutil.EnvPath(userdirs.EnvPublicShareDir, dirs.PublicShare, filepath.Join(home, "Public")) -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/paths_windows.go b/openshift/tools/vendor/github.com/adrg/xdg/paths_windows.go deleted file mode 100644 index bb80819ffc..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/paths_windows.go +++ /dev/null @@ -1,196 +0,0 @@ -package xdg - -import ( - "path/filepath" - - "github.com/adrg/xdg/internal/pathutil" - "github.com/adrg/xdg/internal/userdirs" - "golang.org/x/sys/windows" -) - -func initDirs(home string) { - kf := initKnownFolders(home) - initBaseDirs(home, kf) - initUserDirs(home, kf) -} - -func initBaseDirs(home string, kf *knownFolders) { - // Initialize standard directories. - baseDirs.dataHome = pathutil.EnvPath(envDataHome, kf.localAppData) - baseDirs.data = pathutil.EnvPathList(envDataDirs, kf.roamingAppData, kf.programData) - baseDirs.configHome = pathutil.EnvPath(envConfigHome, kf.localAppData) - baseDirs.config = pathutil.EnvPathList(envConfigDirs, kf.programData, kf.roamingAppData) - baseDirs.stateHome = pathutil.EnvPath(envStateHome, kf.localAppData) - baseDirs.cacheHome = pathutil.EnvPath(envCacheHome, filepath.Join(kf.localAppData, "cache")) - baseDirs.runtime = pathutil.EnvPath(envRuntimeDir, kf.localAppData) - - // Initialize non-standard directories. - baseDirs.binHome = pathutil.EnvPath(envBinHome, kf.userProgramFiles) - - baseDirs.applications = []string{ - kf.programs, - kf.commonPrograms, - kf.programFiles, - kf.programFilesCommon, - kf.userProgramFiles, - kf.userProgramFilesCommon, - } - - baseDirs.fonts = []string{ - kf.fonts, - filepath.Join(kf.localAppData, "Microsoft", "Windows", "Fonts"), - } -} - -func initUserDirs(home string, kf *knownFolders) { - UserDirs.Desktop = pathutil.EnvPath(userdirs.EnvDesktopDir, kf.desktop) - UserDirs.Download = pathutil.EnvPath(userdirs.EnvDownloadDir, kf.downloads) - UserDirs.Documents = pathutil.EnvPath(userdirs.EnvDocumentsDir, kf.documents) - UserDirs.Music = pathutil.EnvPath(userdirs.EnvMusicDir, kf.music) - UserDirs.Pictures = pathutil.EnvPath(userdirs.EnvPicturesDir, kf.pictures) - UserDirs.Videos = pathutil.EnvPath(userdirs.EnvVideosDir, kf.videos) - UserDirs.Templates = pathutil.EnvPath(userdirs.EnvTemplatesDir, kf.templates) - UserDirs.PublicShare = pathutil.EnvPath(userdirs.EnvPublicShareDir, kf.public) -} - -type knownFolders struct { - systemDrive string - systemRoot string - programData string - userProfile string - userProfiles string - roamingAppData string - localAppData string - desktop string - downloads string - documents string - music string - pictures string - videos string - templates string - public string - fonts string - programs string - commonPrograms string - programFiles string - programFilesCommon string - userProgramFiles string - userProgramFilesCommon string -} - -func initKnownFolders(home string) *knownFolders { - kf := &knownFolders{ - userProfile: home, - } - kf.systemDrive = filepath.VolumeName(pathutil.KnownFolder( - windows.FOLDERID_Windows, - []string{"SystemDrive", "SystemRoot", "windir"}, - []string{home, `C:`}, - )) + string(filepath.Separator) - kf.systemRoot = pathutil.KnownFolder( - windows.FOLDERID_Windows, - []string{"SystemRoot", "windir"}, - []string{filepath.Join(kf.systemDrive, "Windows")}, - ) - kf.programData = pathutil.KnownFolder( - windows.FOLDERID_ProgramData, - []string{"ProgramData", "ALLUSERSPROFILE"}, - []string{filepath.Join(kf.systemDrive, "ProgramData")}, - ) - kf.userProfiles = pathutil.KnownFolder( - windows.FOLDERID_UserProfiles, - nil, - []string{filepath.Join(kf.systemDrive, "Users")}, - ) - kf.roamingAppData = pathutil.KnownFolder( - windows.FOLDERID_RoamingAppData, - []string{"APPDATA"}, - []string{filepath.Join(home, "AppData", "Roaming")}, - ) - kf.localAppData = pathutil.KnownFolder( - windows.FOLDERID_LocalAppData, - []string{"LOCALAPPDATA"}, - []string{filepath.Join(home, "AppData", "Local")}, - ) - kf.desktop = pathutil.KnownFolder( - windows.FOLDERID_Desktop, - nil, - []string{filepath.Join(home, "Desktop")}, - ) - kf.downloads = pathutil.KnownFolder( - windows.FOLDERID_Downloads, - nil, - []string{filepath.Join(home, "Downloads")}, - ) - kf.documents = pathutil.KnownFolder( - windows.FOLDERID_Documents, - nil, - []string{filepath.Join(home, "Documents")}, - ) - kf.music = pathutil.KnownFolder( - windows.FOLDERID_Music, - nil, - []string{filepath.Join(home, "Music")}, - ) - kf.pictures = pathutil.KnownFolder( - windows.FOLDERID_Pictures, - nil, - []string{filepath.Join(home, "Pictures")}, - ) - kf.videos = pathutil.KnownFolder( - windows.FOLDERID_Videos, - nil, - []string{filepath.Join(home, "Videos")}, - ) - kf.templates = pathutil.KnownFolder( - windows.FOLDERID_Templates, - nil, - []string{filepath.Join(kf.roamingAppData, "Microsoft", "Windows", "Templates")}, - ) - kf.public = pathutil.KnownFolder( - windows.FOLDERID_Public, - []string{"PUBLIC"}, - []string{filepath.Join(kf.userProfiles, "Public")}, - ) - kf.fonts = pathutil.KnownFolder( - windows.FOLDERID_Fonts, - nil, - []string{filepath.Join(kf.systemRoot, "Fonts")}, - ) - kf.programs = pathutil.KnownFolder( - windows.FOLDERID_Programs, - nil, - []string{filepath.Join(kf.roamingAppData, "Microsoft", "Windows", "Start Menu", "Programs")}, - ) - kf.commonPrograms = pathutil.KnownFolder( - windows.FOLDERID_CommonPrograms, - nil, - []string{filepath.Join(kf.programData, "Microsoft", "Windows", "Start Menu", "Programs")}, - ) - kf.programFiles = pathutil.KnownFolder( - windows.FOLDERID_ProgramFiles, - []string{"ProgramFiles"}, - []string{filepath.Join(kf.systemDrive, "Program Files")}, - ) - kf.programFilesCommon = pathutil.KnownFolder( - windows.FOLDERID_ProgramFilesCommon, - nil, - []string{filepath.Join(kf.programFiles, "Common Files")}, - ) - kf.userProgramFiles = pathutil.KnownFolder( - windows.FOLDERID_UserProgramFiles, - nil, - []string{ - filepath.Join(kf.localAppData, "Programs"), - }, - ) - kf.userProgramFilesCommon = pathutil.KnownFolder( - windows.FOLDERID_UserProgramFilesCommon, - nil, - []string{ - filepath.Join(kf.userProgramFiles, "Common"), - }, - ) - - return kf -} diff --git a/openshift/tools/vendor/github.com/adrg/xdg/xdg.go b/openshift/tools/vendor/github.com/adrg/xdg/xdg.go deleted file mode 100644 index f4f1d4fed5..0000000000 --- a/openshift/tools/vendor/github.com/adrg/xdg/xdg.go +++ /dev/null @@ -1,212 +0,0 @@ -package xdg - -import ( - "github.com/adrg/xdg/internal/pathutil" - "github.com/adrg/xdg/internal/userdirs" -) - -// UserDirectories defines the locations of well known user directories. -type UserDirectories = userdirs.Directories - -var ( - // Home contains the path of the user's home directory. - Home string - - // DataHome defines the base directory relative to which user-specific - // data files should be stored. This directory is defined by the - // $XDG_DATA_HOME environment variable. If the variable is not set, - // a default equal to $HOME/.local/share should be used. - DataHome string - - // DataDirs defines the preference-ordered set of base directories to - // search for data files in addition to the DataHome base directory. - // This set of directories is defined by the $XDG_DATA_DIRS environment - // variable. If the variable is not set, the default directories - // to be used are /usr/local/share and /usr/share, in that order. The - // DataHome directory is considered more important than any of the - // directories defined by DataDirs. Therefore, user data files should be - // written relative to the DataHome directory, if possible. - DataDirs []string - - // ConfigHome defines the base directory relative to which user-specific - // configuration files should be written. This directory is defined by - // the $XDG_CONFIG_HOME environment variable. If the variable is - // not set, a default equal to $HOME/.config should be used. - ConfigHome string - - // ConfigDirs defines the preference-ordered set of base directories to - // search for configuration files in addition to the ConfigHome base - // directory. This set of directories is defined by the $XDG_CONFIG_DIRS - // environment variable. If the variable is not set, a default equal - // to /etc/xdg should be used. The ConfigHome directory is considered - // more important than any of the directories defined by ConfigDirs. - // Therefore, user config files should be written relative to the - // ConfigHome directory, if possible. - ConfigDirs []string - - // StateHome defines the base directory relative to which user-specific - // state files should be stored. This directory is defined by the - // $XDG_STATE_HOME environment variable. If the variable is not set, - // a default equal to ~/.local/state should be used. - StateHome string - - // CacheHome defines the base directory relative to which user-specific - // non-essential (cached) data should be written. This directory is - // defined by the $XDG_CACHE_HOME environment variable. If the variable - // is not set, a default equal to $HOME/.cache should be used. - CacheHome string - - // RuntimeDir defines the base directory relative to which user-specific - // non-essential runtime files and other file objects (such as sockets, - // named pipes, etc.) should be stored. This directory is defined by the - // $XDG_RUNTIME_DIR environment variable. If the variable is not set, - // applications should fall back to a replacement directory with similar - // capabilities. Applications should use this directory for communication - // and synchronization purposes and should not place larger files in it, - // since it might reside in runtime memory and cannot necessarily be - // swapped out to disk. - RuntimeDir string - - // BinHome defines the base directory relative to which user-specific - // binary files should be written. This directory is defined by - // the non-standard $XDG_BIN_HOME environment variable. If the variable is - // not set, a default equal to $HOME/.local/bin should be used. - BinHome string - - // UserDirs defines the locations of well known user directories. - UserDirs UserDirectories - - // FontDirs defines the common locations where font files are stored. - FontDirs []string - - // ApplicationDirs defines the common locations of applications. - ApplicationDirs []string - - // baseDirs defines the locations of base directories. - baseDirs baseDirectories -) - -func init() { - Reload() -} - -// Reload refreshes base and user directories by reading the environment. -// Defaults are applied for XDG variables which are empty or not present -// in the environment. -func Reload() { - // Initialize home directory. - Home = pathutil.UserHomeDir() - - // Initialize base and user directories. - initDirs(Home) - - // Set standard directories. - DataHome = baseDirs.dataHome - DataDirs = baseDirs.data - ConfigHome = baseDirs.configHome - ConfigDirs = baseDirs.config - StateHome = baseDirs.stateHome - CacheHome = baseDirs.cacheHome - RuntimeDir = baseDirs.runtime - - // Set non-standard directories. - BinHome = baseDirs.binHome - FontDirs = baseDirs.fonts - ApplicationDirs = baseDirs.applications -} - -// DataFile returns a suitable location for the specified data file. -// The relPath parameter must contain the name of the data file, and -// optionally, a set of parent directories (e.g. appname/app.data). -// If the specified directories do not exist, they will be created relative -// to the base data directory. On failure, an error containing the -// attempted paths is returned. -func DataFile(relPath string) (string, error) { - return baseDirs.dataFile(relPath) -} - -// ConfigFile returns a suitable location for the specified config file. -// The relPath parameter must contain the name of the config file, and -// optionally, a set of parent directories (e.g. appname/app.yaml). -// If the specified directories do not exist, they will be created relative -// to the base config directory. On failure, an error containing the -// attempted paths is returned. -func ConfigFile(relPath string) (string, error) { - return baseDirs.configFile(relPath) -} - -// StateFile returns a suitable location for the specified state file. State -// files are usually volatile data files, not suitable to be stored relative -// to the $XDG_DATA_HOME directory. -// The relPath parameter must contain the name of the state file, and -// optionally, a set of parent directories (e.g. appname/app.state). -// If the specified directories do not exist, they will be created relative -// to the base state directory. On failure, an error containing the -// attempted paths is returned. -func StateFile(relPath string) (string, error) { - return baseDirs.stateFile(relPath) -} - -// CacheFile returns a suitable location for the specified cache file. -// The relPath parameter must contain the name of the cache file, and -// optionally, a set of parent directories (e.g. appname/app.cache). -// If the specified directories do not exist, they will be created relative -// to the base cache directory. On failure, an error containing the -// attempted paths is returned. -func CacheFile(relPath string) (string, error) { - return baseDirs.cacheFile(relPath) -} - -// RuntimeFile returns a suitable location for the specified runtime file. -// The relPath parameter must contain the name of the runtime file, and -// optionally, a set of parent directories (e.g. appname/app.pid). -// If the specified directories do not exist, they will be created relative -// to the base runtime directory. If the base runtime directory does not exist, -// the operating system's temporary directory is used as a fallback. On failure, -// an error containing the attempted paths is returned. -func RuntimeFile(relPath string) (string, error) { - return baseDirs.runtimeFile(relPath) -} - -// SearchDataFile searches for specified file in the data search paths. -// The relPath parameter must contain the name of the data file, and -// optionally, a set of parent directories (e.g. appname/app.data). If the -// file cannot be found, an error specifying the searched paths is returned. -func SearchDataFile(relPath string) (string, error) { - return baseDirs.searchDataFile(relPath) -} - -// SearchConfigFile searches for the specified file in config search paths. -// The relPath parameter must contain the name of the config file, and -// optionally, a set of parent directories (e.g. appname/app.yaml). If the -// file cannot be found, an error specifying the searched paths is returned. -func SearchConfigFile(relPath string) (string, error) { - return baseDirs.searchConfigFile(relPath) -} - -// SearchStateFile searches for the specified file in the state search path. -// The relPath parameter must contain the name of the state file, and -// optionally, a set of parent directories (e.g. appname/app.state). If the -// file cannot be found, an error specifying the searched path is returned. -func SearchStateFile(relPath string) (string, error) { - return baseDirs.searchStateFile(relPath) -} - -// SearchCacheFile searches for the specified file in the cache search path. -// The relPath parameter must contain the name of the cache file, and -// optionally, a set of parent directories (e.g. appname/app.cache). If the -// file cannot be found, an error specifying the searched path is returned. -func SearchCacheFile(relPath string) (string, error) { - return baseDirs.searchCacheFile(relPath) -} - -// SearchRuntimeFile searches for the specified file in the runtime search path. -// The relPath parameter must contain the name of the runtime file, and -// optionally, a set of parent directories (e.g. appname/app.pid). The runtime -// file is also searched in the operating system's temporary directory in order -// to cover cases in which the runtime base directory does not exist or is not -// accessible. If the file cannot be found, an error specifying the searched -// paths is returned. -func SearchRuntimeFile(relPath string) (string, error) { - return baseDirs.searchRuntimeFile(relPath) -} diff --git a/openshift/tools/vendor/github.com/beorn7/perks/LICENSE b/openshift/tools/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be66..0000000000 --- a/openshift/tools/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/openshift/tools/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7c..0000000000 --- a/openshift/tools/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/openshift/tools/vendor/github.com/beorn7/perks/quantile/stream.go b/openshift/tools/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8eb6..0000000000 --- a/openshift/tools/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/openshift/tools/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/README.md b/openshift/tools/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 33c88305c4..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# xxhash - -[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) -[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) -- [Ristretto](https://github.com/dgraph-io/ristretto) -- [Badger](https://github.com/dgraph-io/badger) diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/testall.sh b/openshift/tools/vendor/github.com/cespare/xxhash/v2/testall.sh deleted file mode 100644 index 94b9c44398..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/testall.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -eu -o pipefail - -# Small convenience script for running the tests with various combinations of -# arch/tags. This assumes we're running on amd64 and have qemu available. - -go test ./... -go test -tags purego ./... -GOARCH=arm64 go test -GOARCH=arm64 go test -tags purego diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash.go b/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index 78bddf1cee..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,243 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array for the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -// -// Note that a zero-valued Digest is not ready to receive writes. -// Call Reset or create a Digest using New before calling other methods. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest with a zero seed. -func New() *Digest { - return NewWithSeed(0) -} - -// NewWithSeed creates a new Digest with the given seed. -func NewWithSeed(seed uint64) *Digest { - var d Digest - d.ResetWithSeed(seed) - return &d -} - -// Reset clears the Digest's state so that it can be reused. -// It uses a seed value of zero. -func (d *Digest) Reset() { - d.ResetWithSeed(0) -} - -// ResetWithSeed clears the Digest's state so that it can be reused. -// It uses the given seed to initialize the state. -func (d *Digest) ResetWithSeed(seed uint64) { - d.v1 = seed + prime1 + prime2 - d.v2 = seed + prime2 - d.v3 = seed - d.v4 = seed - prime1 - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index 3e8b132579..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,209 +0,0 @@ -//go:build !appengine && gc && !purego -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s deleted file mode 100644 index 7e3145a221..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s +++ /dev/null @@ -1,183 +0,0 @@ -//go:build !appengine && gc && !purego -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go deleted file mode 100644 index 78f95f2561..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b with a zero seed. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 118e49e819..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego -// +build !amd64,!arm64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b with a zero seed. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index 05f5e7dfe7..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build appengine -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s with a zero seed. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index cf9d42aed5..0000000000 --- a/openshift/tools/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build !appengine -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "unsafe" -) - -// In the future it's possible that compiler optimizations will make these -// XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. -// If that happens, even if we keep these functions they can be replaced with -// the trivial safe code. - -// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: -// -// var b []byte -// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) -// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data -// bh.Len = len(s) -// bh.Cap = len(s) -// -// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough -// weight to this sequence of expressions that any function that uses it will -// not be inlined. Instead, the functions below use a different unsafe -// conversion designed to minimize the inliner weight and allow both to be -// inlined. There is also a test (TestInlining) which verifies that these are -// inlined. -// -// See https://github.com/golang/go/issues/42739 for discussion. - -// Sum64String computes the 64-bit xxHash digest of s with a zero seed. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) - // d.Write always returns len(s), nil. - // Ignoring the return output and returning these fixed values buys a - // savings of 6 in the inliner's cost model. - return len(s), nil -} - -// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout -// of the first two words is the same as the layout of a string. -type sliceHeader struct { - s string - cap int -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/LICENSE b/openshift/tools/vendor/github.com/cloudflare/circl/LICENSE deleted file mode 100644 index 67edaa90a0..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/LICENSE +++ /dev/null @@ -1,57 +0,0 @@ -Copyright (c) 2019 Cloudflare. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Cloudflare nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -======================================================================== - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve.go deleted file mode 100644 index f9057c2b86..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve.go +++ /dev/null @@ -1,96 +0,0 @@ -package x25519 - -import ( - fp "github.com/cloudflare/circl/math/fp25519" -) - -// ladderJoye calculates a fixed-point multiplication with the generator point. -// The algorithm is the right-to-left Joye's ladder as described -// in "How to precompute a ladder" in SAC'2017. -func ladderJoye(k *Key) { - w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. - fp.SetOne(&w[1]) // x1 = 1 - fp.SetOne(&w[2]) // z1 = 1 - w[3] = fp.Elt{ // x2 = G-S - 0xbd, 0xaa, 0x2f, 0xc8, 0xfe, 0xe1, 0x94, 0x7e, - 0xf8, 0xed, 0xb2, 0x14, 0xae, 0x95, 0xf0, 0xbb, - 0xe2, 0x48, 0x5d, 0x23, 0xb9, 0xa0, 0xc7, 0xad, - 0x34, 0xab, 0x7c, 0xe2, 0xee, 0xcd, 0xae, 0x1e, - } - fp.SetOne(&w[4]) // z2 = 1 - - const n = 255 - const h = 3 - swap := uint(1) - for s := 0; s < n-h; s++ { - i := (s + h) / 8 - j := (s + h) % 8 - bit := uint((k[i] >> uint(j)) & 1) - copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) - diffAdd(&w, swap^bit) - swap = bit - } - for s := 0; s < h; s++ { - double(&w[1], &w[2]) - } - toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) -} - -// ladderMontgomery calculates a generic scalar point multiplication -// The algorithm implemented is the left-to-right Montgomery's ladder. -func ladderMontgomery(k, xP *Key) { - w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. - w[0] = *(*fp.Elt)(xP) // x1 = xP - fp.SetOne(&w[1]) // x2 = 1 - w[3] = *(*fp.Elt)(xP) // x3 = xP - fp.SetOne(&w[4]) // z3 = 1 - - move := uint(0) - for s := 255 - 1; s >= 0; s-- { - i := s / 8 - j := s % 8 - bit := uint((k[i] >> uint(j)) & 1) - ladderStep(&w, move^bit) - move = bit - } - toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) -} - -func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { - fp.Inv(z, z) - fp.Mul(x, x, z) - _ = fp.ToBytes(k[:], x) -} - -var lowOrderPoints = [5]fp.Elt{ - { /* (0,_,1) point of order 2 on Curve25519 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - { /* (1,_,1) point of order 4 on Curve25519 */ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - { /* (x,_,1) first point of order 8 on Curve25519 */ - 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, - 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, - 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, - 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00, - }, - { /* (x,_,1) second point of order 8 on Curve25519 */ - 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, - 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, - 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, - 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57, - }, - { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */ - 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, - }, -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go deleted file mode 100644 index 8a3d54c570..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build amd64 && !purego -// +build amd64,!purego - -package x25519 - -import ( - fp "github.com/cloudflare/circl/math/fp25519" - "golang.org/x/sys/cpu" -) - -var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX - -var _ = hasBmi2Adx - -func double(x, z *fp.Elt) { doubleAmd64(x, z) } -func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } -func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } -func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } - -//go:noescape -func ladderStepAmd64(w *[5]fp.Elt, b uint) - -//go:noescape -func diffAddAmd64(w *[5]fp.Elt, b uint) - -//go:noescape -func doubleAmd64(x, z *fp.Elt) - -//go:noescape -func mulA24Amd64(z, x *fp.Elt) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h deleted file mode 100644 index 8c1ae4d0fb..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h +++ /dev/null @@ -1,111 +0,0 @@ -#define ladderStepLeg \ - addSub(x2,z2) \ - addSub(x3,z3) \ - integerMulLeg(b0,x2,z3) \ - integerMulLeg(b1,x3,z2) \ - reduceFromDoubleLeg(t0,b0) \ - reduceFromDoubleLeg(t1,b1) \ - addSub(t0,t1) \ - cselect(x2,x3,regMove) \ - cselect(z2,z3,regMove) \ - integerSqrLeg(b0,t0) \ - integerSqrLeg(b1,t1) \ - reduceFromDoubleLeg(x3,b0) \ - reduceFromDoubleLeg(z3,b1) \ - integerMulLeg(b0,x1,z3) \ - reduceFromDoubleLeg(z3,b0) \ - integerSqrLeg(b0,x2) \ - integerSqrLeg(b1,z2) \ - reduceFromDoubleLeg(x2,b0) \ - reduceFromDoubleLeg(z2,b1) \ - subtraction(t0,x2,z2) \ - multiplyA24Leg(t1,t0) \ - additionLeg(t1,t1,z2) \ - integerMulLeg(b0,x2,z2) \ - integerMulLeg(b1,t0,t1) \ - reduceFromDoubleLeg(x2,b0) \ - reduceFromDoubleLeg(z2,b1) - -#define ladderStepBmi2Adx \ - addSub(x2,z2) \ - addSub(x3,z3) \ - integerMulAdx(b0,x2,z3) \ - integerMulAdx(b1,x3,z2) \ - reduceFromDoubleAdx(t0,b0) \ - reduceFromDoubleAdx(t1,b1) \ - addSub(t0,t1) \ - cselect(x2,x3,regMove) \ - cselect(z2,z3,regMove) \ - integerSqrAdx(b0,t0) \ - integerSqrAdx(b1,t1) \ - reduceFromDoubleAdx(x3,b0) \ - reduceFromDoubleAdx(z3,b1) \ - integerMulAdx(b0,x1,z3) \ - reduceFromDoubleAdx(z3,b0) \ - integerSqrAdx(b0,x2) \ - integerSqrAdx(b1,z2) \ - reduceFromDoubleAdx(x2,b0) \ - reduceFromDoubleAdx(z2,b1) \ - subtraction(t0,x2,z2) \ - multiplyA24Adx(t1,t0) \ - additionAdx(t1,t1,z2) \ - integerMulAdx(b0,x2,z2) \ - integerMulAdx(b1,t0,t1) \ - reduceFromDoubleAdx(x2,b0) \ - reduceFromDoubleAdx(z2,b1) - -#define difAddLeg \ - addSub(x1,z1) \ - integerMulLeg(b0,z1,ui) \ - reduceFromDoubleLeg(z1,b0) \ - addSub(x1,z1) \ - integerSqrLeg(b0,x1) \ - integerSqrLeg(b1,z1) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) \ - integerMulLeg(b0,x1,z2) \ - integerMulLeg(b1,z1,x2) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) - -#define difAddBmi2Adx \ - addSub(x1,z1) \ - integerMulAdx(b0,z1,ui) \ - reduceFromDoubleAdx(z1,b0) \ - addSub(x1,z1) \ - integerSqrAdx(b0,x1) \ - integerSqrAdx(b1,z1) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) \ - integerMulAdx(b0,x1,z2) \ - integerMulAdx(b1,z1,x2) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) - -#define doubleLeg \ - addSub(x1,z1) \ - integerSqrLeg(b0,x1) \ - integerSqrLeg(b1,z1) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) \ - subtraction(t0,x1,z1) \ - multiplyA24Leg(t1,t0) \ - additionLeg(t1,t1,z1) \ - integerMulLeg(b0,x1,z1) \ - integerMulLeg(b1,t0,t1) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) - -#define doubleBmi2Adx \ - addSub(x1,z1) \ - integerSqrAdx(b0,x1) \ - integerSqrAdx(b1,z1) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) \ - subtraction(t0,x1,z1) \ - multiplyA24Adx(t1,t0) \ - additionAdx(t1,t1,z1) \ - integerMulAdx(b0,x1,z1) \ - integerMulAdx(b1,t0,t1) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s deleted file mode 100644 index ce9f062894..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s +++ /dev/null @@ -1,157 +0,0 @@ -//go:build amd64 && !purego -// +build amd64,!purego - -#include "textflag.h" - -// Depends on circl/math/fp25519 package -#include "../../math/fp25519/fp_amd64.h" -#include "curve_amd64.h" - -// CTE_A24 is (A+2)/4 from Curve25519 -#define CTE_A24 121666 - -#define Size 32 - -// multiplyA24Leg multiplies x times CTE_A24 and stores in z -// Uses: AX, DX, R8-R13, FLAGS -// Instr: x86_64, cmov -#define multiplyA24Leg(z,x) \ - MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \ - MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \ - MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \ - MOVL $CTE_A24, AX; MULQ 24+x; \ - ADDQ R12, R9; \ - ADCQ R13, R10; \ - ADCQ AX, R11; \ - ADCQ $0, DX; \ - MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ - IMULQ AX, DX; \ - ADDQ DX, R8; \ - ADCQ $0, R9; MOVQ R9, 8+z; \ - ADCQ $0, R10; MOVQ R10, 16+z; \ - ADCQ $0, R11; MOVQ R11, 24+z; \ - MOVQ $0, DX; \ - CMOVQCS AX, DX; \ - ADDQ DX, R8; MOVQ R8, 0+z; - -// multiplyA24Adx multiplies x times CTE_A24 and stores in z -// Uses: AX, DX, R8-R12, FLAGS -// Instr: x86_64, cmov, bmi2 -#define multiplyA24Adx(z,x) \ - MOVQ $CTE_A24, DX; \ - MULXQ 0+x, R8, R10; \ - MULXQ 8+x, R9, R11; ADDQ R10, R9; \ - MULXQ 16+x, R10, AX; ADCQ R11, R10; \ - MULXQ 24+x, R11, R12; ADCQ AX, R11; \ - ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \ - MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ - IMULQ DX, R12; \ - ADDQ R12, R8; \ - ADCQ $0, R9; MOVQ R9, 8+z; \ - ADCQ $0, R10; MOVQ R10, 16+z; \ - ADCQ $0, R11; MOVQ R11, 24+z; \ - MOVQ $0, R12; \ - CMOVQCS DX, R12; \ - ADDQ R12, R8; MOVQ R8, 0+z; - -#define mulA24Legacy \ - multiplyA24Leg(0(DI),0(SI)) -#define mulA24Bmi2Adx \ - multiplyA24Adx(0(DI),0(SI)) - -// func mulA24Amd64(z, x *fp255.Elt) -TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 - MOVQ z+0(FP), DI - MOVQ x+8(FP), SI - CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) - - -// func ladderStepAmd64(w *[5]fp255.Elt, b uint) -// ladderStepAmd64 calculates a point addition and doubling as follows: -// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). -// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes. -// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and -// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. -TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16 - // Parameters - #define regWork DI - #define regMove SI - #define x1 0*Size(regWork) - #define x2 1*Size(regWork) - #define z2 2*Size(regWork) - #define x3 3*Size(regWork) - #define z3 4*Size(regWork) - // Local variables - #define t0 0*Size(SP) - #define t1 1*Size(SP) - #define b0 2*Size(SP) - #define b1 4*Size(SP) - MOVQ w+0(FP), regWork - MOVQ b+8(FP), regMove - CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) - #undef regWork - #undef regMove - #undef x1 - #undef x2 - #undef z2 - #undef x3 - #undef z3 - #undef t0 - #undef t1 - #undef b0 - #undef b1 - -// func diffAddAmd64(w *[5]fp255.Elt, b uint) -// diffAddAmd64 calculates a differential point addition using a precomputed point. -// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) -// w = (mu,x1,z1,x2,z2) are five fp.Elt, and -// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. -TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16 - // Parameters - #define regWork DI - #define regSwap SI - #define ui 0*Size(regWork) - #define x1 1*Size(regWork) - #define z1 2*Size(regWork) - #define x2 3*Size(regWork) - #define z2 4*Size(regWork) - // Local variables - #define b0 0*Size(SP) - #define b1 2*Size(SP) - MOVQ w+0(FP), regWork - MOVQ b+8(FP), regSwap - cswap(x1,x2,regSwap) - cswap(z1,z2,regSwap) - CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) - #undef regWork - #undef regSwap - #undef ui - #undef x1 - #undef z1 - #undef x2 - #undef z2 - #undef b0 - #undef b1 - -// func doubleAmd64(x, z *fp255.Elt) -// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). -// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and -// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. -TEXT ·doubleAmd64(SB),NOSPLIT,$192-16 - // Parameters - #define x1 0(DI) - #define z1 0(SI) - // Local variables - #define t0 0*Size(SP) - #define t1 1*Size(SP) - #define b0 2*Size(SP) - #define b1 4*Size(SP) - MOVQ x+0(FP), DI - MOVQ z+8(FP), SI - CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) - #undef x1 - #undef z1 - #undef t0 - #undef t1 - #undef b0 - #undef b1 diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go deleted file mode 100644 index dae67ea37d..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go +++ /dev/null @@ -1,85 +0,0 @@ -package x25519 - -import ( - "encoding/binary" - "math/bits" - - fp "github.com/cloudflare/circl/math/fp25519" -) - -func doubleGeneric(x, z *fp.Elt) { - t0, t1 := &fp.Elt{}, &fp.Elt{} - fp.AddSub(x, z) - fp.Sqr(x, x) - fp.Sqr(z, z) - fp.Sub(t0, x, z) - mulA24Generic(t1, t0) - fp.Add(t1, t1, z) - fp.Mul(x, x, z) - fp.Mul(z, t0, t1) -} - -func diffAddGeneric(w *[5]fp.Elt, b uint) { - mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] - fp.Cswap(x1, x2, b) - fp.Cswap(z1, z2, b) - fp.AddSub(x1, z1) - fp.Mul(z1, z1, mu) - fp.AddSub(x1, z1) - fp.Sqr(x1, x1) - fp.Sqr(z1, z1) - fp.Mul(x1, x1, z2) - fp.Mul(z1, z1, x2) -} - -func ladderStepGeneric(w *[5]fp.Elt, b uint) { - x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] - t0 := &fp.Elt{} - t1 := &fp.Elt{} - fp.AddSub(x2, z2) - fp.AddSub(x3, z3) - fp.Mul(t0, x2, z3) - fp.Mul(t1, x3, z2) - fp.AddSub(t0, t1) - fp.Cmov(x2, x3, b) - fp.Cmov(z2, z3, b) - fp.Sqr(x3, t0) - fp.Sqr(z3, t1) - fp.Mul(z3, x1, z3) - fp.Sqr(x2, x2) - fp.Sqr(z2, z2) - fp.Sub(t0, x2, z2) - mulA24Generic(t1, t0) - fp.Add(t1, t1, z2) - fp.Mul(x2, x2, z2) - fp.Mul(z2, t0, t1) -} - -func mulA24Generic(z, x *fp.Elt) { - const A24 = 121666 - const n = 8 - var xx [4]uint64 - for i := range xx { - xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) - } - - h0, l0 := bits.Mul64(xx[0], A24) - h1, l1 := bits.Mul64(xx[1], A24) - h2, l2 := bits.Mul64(xx[2], A24) - h3, l3 := bits.Mul64(xx[3], A24) - - var c3 uint64 - l1, c0 := bits.Add64(h0, l1, 0) - l2, c1 := bits.Add64(h1, l2, c0) - l3, c2 := bits.Add64(h2, l3, c1) - l4, _ := bits.Add64(h3, 0, c2) - _, l4 = bits.Mul64(l4, 38) - l0, c0 = bits.Add64(l0, l4, 0) - xx[1], c1 = bits.Add64(l1, 0, c0) - xx[2], c2 = bits.Add64(l2, 0, c1) - xx[3], c3 = bits.Add64(l3, 0, c2) - xx[0], _ = bits.Add64(l0, (-c3)&38, 0) - for i := range xx { - binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) - } -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go deleted file mode 100644 index 07fab97d2a..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !amd64 || purego -// +build !amd64 purego - -package x25519 - -import fp "github.com/cloudflare/circl/math/fp25519" - -func double(x, z *fp.Elt) { doubleGeneric(x, z) } -func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } -func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } -func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/doc.go deleted file mode 100644 index 3ce102d145..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package x25519 provides Diffie-Hellman functions as specified in RFC-7748. - -Validation of public keys. - -The Diffie-Hellman function, as described in RFC-7748 [1], works for any -public key. However, if a different protocol requires contributory -behaviour [2,3], then the public keys must be validated against low-order -points [3,4]. To do that, the Shared function performs this validation -internally and returns false when the public key is invalid (i.e., it -is a low-order point). - -References: - - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) - - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) - - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) - - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) -*/ -package x25519 diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/key.go deleted file mode 100644 index c76f72ac7f..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/key.go +++ /dev/null @@ -1,47 +0,0 @@ -package x25519 - -import ( - "crypto/subtle" - - fp "github.com/cloudflare/circl/math/fp25519" -) - -// Size is the length in bytes of a X25519 key. -const Size = 32 - -// Key represents a X25519 key. -type Key [Size]byte - -func (k *Key) clamp(in *Key) *Key { - *k = *in - k[0] &= 248 - k[31] = (k[31] & 127) | 64 - return k -} - -// isValidPubKey verifies if the public key is not a low-order point. -func (k *Key) isValidPubKey() bool { - fp.Modp((*fp.Elt)(k)) - var isLowOrder int - for _, P := range lowOrderPoints { - isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) - } - return isLowOrder == 0 -} - -// KeyGen obtains a public key given a secret key. -func KeyGen(public, secret *Key) { - ladderJoye(public.clamp(secret)) -} - -// Shared calculates Alice's shared key from Alice's secret key and Bob's -// public key returning true on success. A failure case happens when the public -// key is a low-order point, thus the shared key is all-zeros and the function -// returns false. -func Shared(shared, secret, public *Key) bool { - validPk := *public - validPk[31] &= (1 << (255 % 8)) - 1 - ok := validPk.isValidPubKey() - ladderMontgomery(shared.clamp(secret), &validPk) - return ok -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/table.go deleted file mode 100644 index 28c8c4ac03..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x25519/table.go +++ /dev/null @@ -1,268 +0,0 @@ -package x25519 - -import "github.com/cloudflare/circl/math/fp25519" - -// tableGenerator contains the set of points: -// -// t[i] = (xi+1)/(xi-1), -// -// where (xi,yi) = 2^iG and G is the generator point -// Size = (256)*(256/8) = 8192 bytes. -var tableGenerator = [256 * fp25519.Size]byte{ - /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, - /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51, - /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b, - /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30, - /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17, - /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c, - /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29, - /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69, - /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a, - /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d, - /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d, - /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75, - /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75, - /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e, - /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f, - /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37, - /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41, - /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d, - /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20, - /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51, - /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a, - /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61, - /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29, - /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d, - /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31, - /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02, - /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f, - /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23, - /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18, - /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27, - /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60, - /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79, - /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13, - /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f, - /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74, - /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f, - /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23, - /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29, - /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40, - /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a, - /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26, - /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d, - /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39, - /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48, - /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07, - /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70, - /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62, - /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26, - /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37, - /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32, - /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d, - /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26, - /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a, - /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c, - /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f, - /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a, - /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25, - /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e, - /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75, - /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77, - /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b, - /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c, - /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61, - /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44, - /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a, - /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69, - /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49, - /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e, - /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59, - /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44, - /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79, - /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f, - /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e, - /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e, - /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56, - /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04, - /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22, - /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61, - /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29, - /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a, - /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f, - /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68, - /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c, - /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61, - /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14, - /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f, - /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34, - /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e, - /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01, - /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c, - /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23, - /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a, - /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b, - /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18, - /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c, - /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72, - /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b, - /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40, - /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00, - /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45, - /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e, - /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70, - /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37, - /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09, - /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62, - /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a, - /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13, - /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f, - /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66, - /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04, - /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47, - /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48, - /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31, - /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56, - /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22, - /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77, - /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a, - /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32, - /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25, - /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e, - /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67, - /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68, - /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e, - /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d, - /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48, - /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47, - /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a, - /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c, - /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26, - /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c, - /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72, - /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c, - /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06, - /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11, - /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10, - /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b, - /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b, - /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50, - /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15, - /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c, - /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35, - /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f, - /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61, - /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e, - /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66, - /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d, - /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a, - /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76, - /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02, - /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35, - /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d, - /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f, - /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52, - /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07, - /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a, - /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77, - /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34, - /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c, - /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d, - /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58, - /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30, - /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28, - /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a, - /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60, - /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74, - /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67, - /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48, - /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e, - /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58, - /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f, - /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27, - /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b, - /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74, - /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06, - /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63, - /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b, - /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a, - /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a, - /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f, - /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f, - /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29, - /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07, - /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f, - /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d, - /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44, - /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21, - /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e, - /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49, - /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56, - /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59, - /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c, - /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61, - /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d, - /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09, - /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e, - /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f, - /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c, - /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58, - /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53, - /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72, - /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58, - /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e, - /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d, - /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c, - /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07, - /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59, - /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e, - /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18, - /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e, - /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51, - /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54, - /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22, - /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43, - /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e, - /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43, - /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e, - /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69, - /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09, - /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42, - /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49, - /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53, - /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25, - /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51, - /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71, - /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48, - /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07, - /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c, - /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33, - /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27, - /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78, - /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50, - /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53, - /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a, - /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78, - /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48, - /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41, - /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16, - /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31, - /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05, - /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70, - /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e, - /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60, - /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52, - /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f, - /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25, - /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46, - /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f, - /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b, - /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31, - /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b, - /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d, - /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02, - /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05, - /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79, - /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b, - /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f, -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve.go deleted file mode 100644 index d59564e4b4..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve.go +++ /dev/null @@ -1,104 +0,0 @@ -package x448 - -import ( - fp "github.com/cloudflare/circl/math/fp448" -) - -// ladderJoye calculates a fixed-point multiplication with the generator point. -// The algorithm is the right-to-left Joye's ladder as described -// in "How to precompute a ladder" in SAC'2017. -func ladderJoye(k *Key) { - w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. - w[1] = fp.Elt{ // x1 = S - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - } - fp.SetOne(&w[2]) // z1 = 1 - w[3] = fp.Elt{ // x2 = G-S - 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac, - 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23, - 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1, - 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29, - 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda, - 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a, - 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0, - } - fp.SetOne(&w[4]) // z2 = 1 - - const n = 448 - const h = 2 - swap := uint(1) - for s := 0; s < n-h; s++ { - i := (s + h) / 8 - j := (s + h) % 8 - bit := uint((k[i] >> uint(j)) & 1) - copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) - diffAdd(&w, swap^bit) - swap = bit - } - for s := 0; s < h; s++ { - double(&w[1], &w[2]) - } - toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) -} - -// ladderMontgomery calculates a generic scalar point multiplication -// The algorithm implemented is the left-to-right Montgomery's ladder. -func ladderMontgomery(k, xP *Key) { - w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. - w[0] = *(*fp.Elt)(xP) // x1 = xP - fp.SetOne(&w[1]) // x2 = 1 - w[3] = *(*fp.Elt)(xP) // x3 = xP - fp.SetOne(&w[4]) // z3 = 1 - - move := uint(0) - for s := 448 - 1; s >= 0; s-- { - i := s / 8 - j := s % 8 - bit := uint((k[i] >> uint(j)) & 1) - ladderStep(&w, move^bit) - move = bit - } - toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) -} - -func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { - fp.Inv(z, z) - fp.Mul(x, x, z) - _ = fp.ToBytes(k[:], x) -} - -var lowOrderPoints = [3]fp.Elt{ - { /* (0,_,1) point of order 2 on Curve448 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - { /* (1,_,1) a point of order 4 on the twist of Curve448 */ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - { /* (-1,_,1) point of order 4 on Curve448 */ - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - }, -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go deleted file mode 100644 index a062266613..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build amd64 && !purego -// +build amd64,!purego - -package x448 - -import ( - fp "github.com/cloudflare/circl/math/fp448" - "golang.org/x/sys/cpu" -) - -var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX - -var _ = hasBmi2Adx - -func double(x, z *fp.Elt) { doubleAmd64(x, z) } -func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } -func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } -func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } - -//go:noescape -func doubleAmd64(x, z *fp.Elt) - -//go:noescape -func diffAddAmd64(w *[5]fp.Elt, b uint) - -//go:noescape -func ladderStepAmd64(w *[5]fp.Elt, b uint) - -//go:noescape -func mulA24Amd64(z, x *fp.Elt) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h deleted file mode 100644 index 8c1ae4d0fb..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h +++ /dev/null @@ -1,111 +0,0 @@ -#define ladderStepLeg \ - addSub(x2,z2) \ - addSub(x3,z3) \ - integerMulLeg(b0,x2,z3) \ - integerMulLeg(b1,x3,z2) \ - reduceFromDoubleLeg(t0,b0) \ - reduceFromDoubleLeg(t1,b1) \ - addSub(t0,t1) \ - cselect(x2,x3,regMove) \ - cselect(z2,z3,regMove) \ - integerSqrLeg(b0,t0) \ - integerSqrLeg(b1,t1) \ - reduceFromDoubleLeg(x3,b0) \ - reduceFromDoubleLeg(z3,b1) \ - integerMulLeg(b0,x1,z3) \ - reduceFromDoubleLeg(z3,b0) \ - integerSqrLeg(b0,x2) \ - integerSqrLeg(b1,z2) \ - reduceFromDoubleLeg(x2,b0) \ - reduceFromDoubleLeg(z2,b1) \ - subtraction(t0,x2,z2) \ - multiplyA24Leg(t1,t0) \ - additionLeg(t1,t1,z2) \ - integerMulLeg(b0,x2,z2) \ - integerMulLeg(b1,t0,t1) \ - reduceFromDoubleLeg(x2,b0) \ - reduceFromDoubleLeg(z2,b1) - -#define ladderStepBmi2Adx \ - addSub(x2,z2) \ - addSub(x3,z3) \ - integerMulAdx(b0,x2,z3) \ - integerMulAdx(b1,x3,z2) \ - reduceFromDoubleAdx(t0,b0) \ - reduceFromDoubleAdx(t1,b1) \ - addSub(t0,t1) \ - cselect(x2,x3,regMove) \ - cselect(z2,z3,regMove) \ - integerSqrAdx(b0,t0) \ - integerSqrAdx(b1,t1) \ - reduceFromDoubleAdx(x3,b0) \ - reduceFromDoubleAdx(z3,b1) \ - integerMulAdx(b0,x1,z3) \ - reduceFromDoubleAdx(z3,b0) \ - integerSqrAdx(b0,x2) \ - integerSqrAdx(b1,z2) \ - reduceFromDoubleAdx(x2,b0) \ - reduceFromDoubleAdx(z2,b1) \ - subtraction(t0,x2,z2) \ - multiplyA24Adx(t1,t0) \ - additionAdx(t1,t1,z2) \ - integerMulAdx(b0,x2,z2) \ - integerMulAdx(b1,t0,t1) \ - reduceFromDoubleAdx(x2,b0) \ - reduceFromDoubleAdx(z2,b1) - -#define difAddLeg \ - addSub(x1,z1) \ - integerMulLeg(b0,z1,ui) \ - reduceFromDoubleLeg(z1,b0) \ - addSub(x1,z1) \ - integerSqrLeg(b0,x1) \ - integerSqrLeg(b1,z1) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) \ - integerMulLeg(b0,x1,z2) \ - integerMulLeg(b1,z1,x2) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) - -#define difAddBmi2Adx \ - addSub(x1,z1) \ - integerMulAdx(b0,z1,ui) \ - reduceFromDoubleAdx(z1,b0) \ - addSub(x1,z1) \ - integerSqrAdx(b0,x1) \ - integerSqrAdx(b1,z1) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) \ - integerMulAdx(b0,x1,z2) \ - integerMulAdx(b1,z1,x2) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) - -#define doubleLeg \ - addSub(x1,z1) \ - integerSqrLeg(b0,x1) \ - integerSqrLeg(b1,z1) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) \ - subtraction(t0,x1,z1) \ - multiplyA24Leg(t1,t0) \ - additionLeg(t1,t1,z1) \ - integerMulLeg(b0,x1,z1) \ - integerMulLeg(b1,t0,t1) \ - reduceFromDoubleLeg(x1,b0) \ - reduceFromDoubleLeg(z1,b1) - -#define doubleBmi2Adx \ - addSub(x1,z1) \ - integerSqrAdx(b0,x1) \ - integerSqrAdx(b1,z1) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) \ - subtraction(t0,x1,z1) \ - multiplyA24Adx(t1,t0) \ - additionAdx(t1,t1,z1) \ - integerMulAdx(b0,x1,z1) \ - integerMulAdx(b1,t0,t1) \ - reduceFromDoubleAdx(x1,b0) \ - reduceFromDoubleAdx(z1,b1) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s deleted file mode 100644 index ed33ba3d03..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s +++ /dev/null @@ -1,194 +0,0 @@ -//go:build amd64 && !purego -// +build amd64,!purego - -#include "textflag.h" - -// Depends on circl/math/fp448 package -#include "../../math/fp448/fp_amd64.h" -#include "curve_amd64.h" - -// CTE_A24 is (A+2)/4 from Curve448 -#define CTE_A24 39082 - -#define Size 56 - -// multiplyA24Leg multiplies x times CTE_A24 and stores in z -// Uses: AX, DX, R8-R15, FLAGS -// Instr: x86_64, cmov, adx -#define multiplyA24Leg(z,x) \ - MOVQ $CTE_A24, R15; \ - MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ - MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ - MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ - MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ - MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ - MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ - MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ - MOVQ DX, AX; \ - SHLQ $32, AX; \ - ADDQ DX, R8; MOVQ $0, DX; \ - ADCQ $0, R9; \ - ADCQ $0, R10; \ - ADCQ AX, R11; \ - ADCQ $0, R12; \ - ADCQ $0, R13; \ - ADCQ $0, R14; \ - ADCQ $0, DX; \ - MOVQ DX, AX; \ - SHLQ $32, AX; \ - ADDQ DX, R8; \ - ADCQ $0, R9; \ - ADCQ $0, R10; \ - ADCQ AX, R11; \ - ADCQ $0, R12; \ - ADCQ $0, R13; \ - ADCQ $0, R14; \ - MOVQ R8, 0+z; \ - MOVQ R9, 8+z; \ - MOVQ R10, 16+z; \ - MOVQ R11, 24+z; \ - MOVQ R12, 32+z; \ - MOVQ R13, 40+z; \ - MOVQ R14, 48+z; - -// multiplyA24Adx multiplies x times CTE_A24 and stores in z -// Uses: AX, DX, R8-R14, FLAGS -// Instr: x86_64, bmi2 -#define multiplyA24Adx(z,x) \ - MOVQ $CTE_A24, DX; \ - MULXQ 0+x, R8, R9; \ - MULXQ 8+x, AX, R10; ADDQ AX, R9; \ - MULXQ 16+x, AX, R11; ADCQ AX, R10; \ - MULXQ 24+x, AX, R12; ADCQ AX, R11; \ - MULXQ 32+x, AX, R13; ADCQ AX, R12; \ - MULXQ 40+x, AX, R14; ADCQ AX, R13; \ - MULXQ 48+x, AX, DX; ADCQ AX, R14; \ - ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \ - MOVQ DX, AX; \ - SHLQ $32, AX; \ - ADDQ DX, R8; MOVQ $0, DX; \ - ADCQ $0, R9; \ - ADCQ $0, R10; \ - ADCQ AX, R11; \ - ADCQ $0, R12; \ - ADCQ $0, R13; \ - ADCQ $0, R14; \ - ADCQ $0, DX; \ - MOVQ DX, AX; \ - SHLQ $32, AX; \ - ADDQ DX, R8; \ - ADCQ $0, R9; \ - ADCQ $0, R10; \ - ADCQ AX, R11; \ - ADCQ $0, R12; \ - ADCQ $0, R13; \ - ADCQ $0, R14; \ - MOVQ R8, 0+z; \ - MOVQ R9, 8+z; \ - MOVQ R10, 16+z; \ - MOVQ R11, 24+z; \ - MOVQ R12, 32+z; \ - MOVQ R13, 40+z; \ - MOVQ R14, 48+z; - -#define mulA24Legacy \ - multiplyA24Leg(0(DI),0(SI)) -#define mulA24Bmi2Adx \ - multiplyA24Adx(0(DI),0(SI)) - -// func mulA24Amd64(z, x *fp448.Elt) -TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 - MOVQ z+0(FP), DI - MOVQ x+8(FP), SI - CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) - -// func ladderStepAmd64(w *[5]fp448.Elt, b uint) -// ladderStepAmd64 calculates a point addition and doubling as follows: -// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). -// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes. -// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and -// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. -TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16 - // Parameters - #define regWork DI - #define regMove SI - #define x1 0*Size(regWork) - #define x2 1*Size(regWork) - #define z2 2*Size(regWork) - #define x3 3*Size(regWork) - #define z3 4*Size(regWork) - // Local variables - #define t0 0*Size(SP) - #define t1 1*Size(SP) - #define b0 2*Size(SP) - #define b1 4*Size(SP) - MOVQ w+0(FP), regWork - MOVQ b+8(FP), regMove - CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) - #undef regWork - #undef regMove - #undef x1 - #undef x2 - #undef z2 - #undef x3 - #undef z3 - #undef t0 - #undef t1 - #undef b0 - #undef b1 - -// func diffAddAmd64(work *[5]fp.Elt, swap uint) -// diffAddAmd64 calculates a differential point addition using a precomputed point. -// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) -// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and -// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. -// This is Equation 7 at https://eprint.iacr.org/2017/264. -TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16 - // Parameters - #define regWork DI - #define regSwap SI - #define ui 0*Size(regWork) - #define x1 1*Size(regWork) - #define z1 2*Size(regWork) - #define x2 3*Size(regWork) - #define z2 4*Size(regWork) - // Local variables - #define b0 0*Size(SP) - #define b1 2*Size(SP) - MOVQ w+0(FP), regWork - MOVQ b+8(FP), regSwap - cswap(x1,x2,regSwap) - cswap(z1,z2,regSwap) - CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) - #undef regWork - #undef regSwap - #undef ui - #undef x1 - #undef z1 - #undef x2 - #undef z2 - #undef b0 - #undef b1 - -// func doubleAmd64(x, z *fp448.Elt) -// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). -// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and -// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. -TEXT ·doubleAmd64(SB),NOSPLIT,$336-16 - // Parameters - #define x1 0(DI) - #define z1 0(SI) - // Local variables - #define t0 0*Size(SP) - #define t1 1*Size(SP) - #define b0 2*Size(SP) - #define b1 4*Size(SP) - MOVQ x+0(FP), DI - MOVQ z+8(FP), SI - CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) - #undef x1 - #undef z1 - #undef t0 - #undef t1 - #undef b0 - #undef b1 diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go deleted file mode 100644 index b0b65ccf7e..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go +++ /dev/null @@ -1,100 +0,0 @@ -package x448 - -import ( - "encoding/binary" - "math/bits" - - "github.com/cloudflare/circl/math/fp448" -) - -func doubleGeneric(x, z *fp448.Elt) { - t0, t1 := &fp448.Elt{}, &fp448.Elt{} - fp448.AddSub(x, z) - fp448.Sqr(x, x) - fp448.Sqr(z, z) - fp448.Sub(t0, x, z) - mulA24Generic(t1, t0) - fp448.Add(t1, t1, z) - fp448.Mul(x, x, z) - fp448.Mul(z, t0, t1) -} - -func diffAddGeneric(w *[5]fp448.Elt, b uint) { - mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] - fp448.Cswap(x1, x2, b) - fp448.Cswap(z1, z2, b) - fp448.AddSub(x1, z1) - fp448.Mul(z1, z1, mu) - fp448.AddSub(x1, z1) - fp448.Sqr(x1, x1) - fp448.Sqr(z1, z1) - fp448.Mul(x1, x1, z2) - fp448.Mul(z1, z1, x2) -} - -func ladderStepGeneric(w *[5]fp448.Elt, b uint) { - x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] - t0 := &fp448.Elt{} - t1 := &fp448.Elt{} - fp448.AddSub(x2, z2) - fp448.AddSub(x3, z3) - fp448.Mul(t0, x2, z3) - fp448.Mul(t1, x3, z2) - fp448.AddSub(t0, t1) - fp448.Cmov(x2, x3, b) - fp448.Cmov(z2, z3, b) - fp448.Sqr(x3, t0) - fp448.Sqr(z3, t1) - fp448.Mul(z3, x1, z3) - fp448.Sqr(x2, x2) - fp448.Sqr(z2, z2) - fp448.Sub(t0, x2, z2) - mulA24Generic(t1, t0) - fp448.Add(t1, t1, z2) - fp448.Mul(x2, x2, z2) - fp448.Mul(z2, t0, t1) -} - -func mulA24Generic(z, x *fp448.Elt) { - const A24 = 39082 - const n = 8 - var xx [7]uint64 - for i := range xx { - xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) - } - h0, l0 := bits.Mul64(xx[0], A24) - h1, l1 := bits.Mul64(xx[1], A24) - h2, l2 := bits.Mul64(xx[2], A24) - h3, l3 := bits.Mul64(xx[3], A24) - h4, l4 := bits.Mul64(xx[4], A24) - h5, l5 := bits.Mul64(xx[5], A24) - h6, l6 := bits.Mul64(xx[6], A24) - - l1, c0 := bits.Add64(h0, l1, 0) - l2, c1 := bits.Add64(h1, l2, c0) - l3, c2 := bits.Add64(h2, l3, c1) - l4, c3 := bits.Add64(h3, l4, c2) - l5, c4 := bits.Add64(h4, l5, c3) - l6, c5 := bits.Add64(h5, l6, c4) - l7, _ := bits.Add64(h6, 0, c5) - - l0, c0 = bits.Add64(l0, l7, 0) - l1, c1 = bits.Add64(l1, 0, c0) - l2, c2 = bits.Add64(l2, 0, c1) - l3, c3 = bits.Add64(l3, l7<<32, c2) - l4, c4 = bits.Add64(l4, 0, c3) - l5, c5 = bits.Add64(l5, 0, c4) - l6, l7 = bits.Add64(l6, 0, c5) - - xx[0], c0 = bits.Add64(l0, l7, 0) - xx[1], c1 = bits.Add64(l1, 0, c0) - xx[2], c2 = bits.Add64(l2, 0, c1) - xx[3], c3 = bits.Add64(l3, l7<<32, c2) - xx[4], c4 = bits.Add64(l4, 0, c3) - xx[5], c5 = bits.Add64(l5, 0, c4) - xx[6], _ = bits.Add64(l6, 0, c5) - - for i := range xx { - binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) - } -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go deleted file mode 100644 index 3755b7c83b..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !amd64 || purego -// +build !amd64 purego - -package x448 - -import fp "github.com/cloudflare/circl/math/fp448" - -func double(x, z *fp.Elt) { doubleGeneric(x, z) } -func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } -func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } -func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/doc.go deleted file mode 100644 index c02904feda..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package x448 provides Diffie-Hellman functions as specified in RFC-7748. - -Validation of public keys. - -The Diffie-Hellman function, as described in RFC-7748 [1], works for any -public key. However, if a different protocol requires contributory -behaviour [2,3], then the public keys must be validated against low-order -points [3,4]. To do that, the Shared function performs this validation -internally and returns false when the public key is invalid (i.e., it -is a low-order point). - -References: - - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) - - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) - - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) - - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) -*/ -package x448 diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/key.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/key.go deleted file mode 100644 index 2fdde51168..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/key.go +++ /dev/null @@ -1,46 +0,0 @@ -package x448 - -import ( - "crypto/subtle" - - fp "github.com/cloudflare/circl/math/fp448" -) - -// Size is the length in bytes of a X448 key. -const Size = 56 - -// Key represents a X448 key. -type Key [Size]byte - -func (k *Key) clamp(in *Key) *Key { - *k = *in - k[0] &= 252 - k[55] |= 128 - return k -} - -// isValidPubKey verifies if the public key is not a low-order point. -func (k *Key) isValidPubKey() bool { - fp.Modp((*fp.Elt)(k)) - var isLowOrder int - for _, P := range lowOrderPoints { - isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) - } - return isLowOrder == 0 -} - -// KeyGen obtains a public key given a secret key. -func KeyGen(public, secret *Key) { - ladderJoye(public.clamp(secret)) -} - -// Shared calculates Alice's shared key from Alice's secret key and Bob's -// public key returning true on success. A failure case happens when the public -// key is a low-order point, thus the shared key is all-zeros and the function -// returns false. -func Shared(shared, secret, public *Key) bool { - validPk := *public - ok := validPk.isValidPubKey() - ladderMontgomery(shared.clamp(secret), &validPk) - return ok -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/table.go b/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/table.go deleted file mode 100644 index eef53c30f8..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/dh/x448/table.go +++ /dev/null @@ -1,460 +0,0 @@ -package x448 - -import fp "github.com/cloudflare/circl/math/fp448" - -// tableGenerator contains the set of points: -// -// t[i] = (xi+1)/(xi-1), -// -// where (xi,yi) = 2^iG and G is the generator point -// Size = (448)*(448/8) = 25088 bytes. -var tableGenerator = [448 * fp.Size]byte{ - /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, - /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28, - /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8, - /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03, - /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa, - /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9, - /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67, - /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e, - /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7, - /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a, - /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d, - /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60, - /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a, - /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae, - /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95, - /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b, - /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34, - /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06, - /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17, - /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d, - /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc, - /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde, - /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11, - /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98, - /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c, - /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed, - /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9, - /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f, - /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75, - /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7, - /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c, - /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b, - /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e, - /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28, - /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2, - /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9, - /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08, - /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8, - /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20, - /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29, - /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e, - /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57, - /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1, - /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d, - /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59, - /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba, - /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee, - /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1, - /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe, - /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae, - /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb, - /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71, - /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f, - /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41, - /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc, - /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef, - /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5, - /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14, - /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91, - /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a, - /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4, - /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08, - /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74, - /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9, - /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95, - /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa, - /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25, - /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc, - /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b, - /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85, - /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51, - /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc, - /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85, - /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5, - /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9, - /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b, - /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82, - /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99, - /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb, - /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1, - /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0, - /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42, - /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b, - /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb, - /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42, - /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd, - /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57, - /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e, - /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59, - /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81, - /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54, - /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b, - /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68, - /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9, - /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77, - /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb, - /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3, - /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0, - /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c, - /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01, - /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7, - /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1, - /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8, - /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda, - /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec, - /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90, - /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29, - /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20, - /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8, - /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d, - /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d, - /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6, - /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32, - /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47, - /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb, - /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e, - /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7, - /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f, - /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5, - /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c, - /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94, - /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf, - /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1, - /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f, - /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8, - /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e, - /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24, - /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac, - /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc, - /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69, - /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7, - /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51, - /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68, - /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8, - /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12, - /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29, - /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9, - /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd, - /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43, - /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9, - /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02, - /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70, - /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32, - /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10, - /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74, - /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f, - /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda, - /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf, - /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0, - /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b, - /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe, - /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e, - /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45, - /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9, - /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33, - /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8, - /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed, - /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43, - /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a, - /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1, - /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a, - /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9, - /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a, - /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29, - /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52, - /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45, - /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3, - /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04, - /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73, - /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4, - /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd, - /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa, - /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2, - /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed, - /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0, - /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22, - /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69, - /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea, - /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f, - /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91, - /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf, - /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a, - /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12, - /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54, - /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad, - /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9, - /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2, - /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3, - /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2, - /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1, - /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b, - /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27, - /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7, - /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d, - /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9, - /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52, - /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a, - /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c, - /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5, - /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74, - /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92, - /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4, - /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7, - /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1, - /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d, - /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69, - /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe, - /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5, - /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d, - /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e, - /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6, - /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e, - /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed, - /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51, - /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48, - /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b, - /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94, - /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c, - /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8, - /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7, - /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79, - /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2, - /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70, - /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82, - /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f, - /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa, - /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c, - /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02, - /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16, - /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76, - /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3, - /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c, - /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65, - /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f, - /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b, - /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6, - /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda, - /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b, - /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1, - /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1, - /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee, - /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22, - /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95, - /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f, - /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd, - /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5, - /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f, - /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6, - /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6, - /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b, - /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80, - /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4, - /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0, - /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95, - /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82, - /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18, - /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82, - /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53, - /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9, - /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8, - /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82, - /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7, - /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8, - /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3, - /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5, - /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87, - /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56, - /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65, - /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce, - /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a, - /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1, - /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0, - /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e, - /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45, - /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9, - /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63, - /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36, - /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55, - /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3, - /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc, - /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9, - /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35, - /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb, - /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33, - /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1, - /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48, - /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08, - /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7, - /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17, - /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16, - /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec, - /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92, - /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c, - /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b, - /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3, - /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8, - /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe, - /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60, - /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61, - /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b, - /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2, - /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23, - /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf, - /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96, - /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6, - /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb, - /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2, - /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7, - /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03, - /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57, - /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89, - /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f, - /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56, - /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27, - /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37, - /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5, - /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc, - /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5, - /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12, - /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28, - /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2, - /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97, - /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0, - /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14, - /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8, - /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64, - /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0, - /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28, - /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2, - /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9, - /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7, - /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b, - /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca, - /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86, - /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28, - /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02, - /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c, - /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc, - /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f, - /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00, - /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c, - /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96, - /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3, - /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75, - /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a, - /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4, - /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04, - /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1, - /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0, - /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08, - /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f, - /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18, - /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a, - /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38, - /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8, - /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35, - /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf, - /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d, - /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98, - /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e, - /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed, - /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad, - /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc, - /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1, - /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7, - /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b, - /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48, - /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93, - /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06, - /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4, - /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5, - /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5, - /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa, - /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9, - /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5, - /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1, - /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24, - /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda, - /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba, - /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7, - /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a, - /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10, - /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59, - /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d, - /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21, - /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5, - /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0, - /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc, - /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f, - /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0, - /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9, - /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7, - /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6, - /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f, - /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb, - /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb, - /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93, - /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c, - /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2, - /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a, - /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f, - /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5, - /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75, - /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f, - /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad, - /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f, - /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7, - /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e, - /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95, - /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0, - /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35, - /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9, - /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67, - /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd, - /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7, - /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce, - /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd, - /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a, - /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96, - /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24, - /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46, - /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7, - /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29, - /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87, - /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec, - /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3, - /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4, - /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2, - /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83, - /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67, - /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25, - /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d, - /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91, - /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9, - /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb, - /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87, - /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12, - /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2, - /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32, - /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc, - /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a, - /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36, - /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2, - /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e, - /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b, - /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd, - /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d, - /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67, -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go deleted file mode 100644 index b6b236e5d3..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go +++ /dev/null @@ -1,71 +0,0 @@ -package goldilocks - -import fp "github.com/cloudflare/circl/math/fp448" - -var ( - // genX is the x-coordinate of the generator of Goldilocks curve. - genX = fp.Elt{ - 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26, - 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43, - 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12, - 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea, - 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e, - 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22, - 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f, - } - // genY is the y-coordinate of the generator of Goldilocks curve. - genY = fp.Elt{ - 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98, - 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd, - 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a, - 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87, - 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b, - 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88, - 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69, - } - // paramD is -39081 in Fp. - paramD = fp.Elt{ - 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - } - // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d, - // which is the number of points in the prime subgroup. - order = Scalar{ - 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23, - 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21, - 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4, - 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, - } - // residue448 is 2^448 mod order. - residue448 = [4]uint64{ - 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058, - } - // invFour is 1/4 mod order. - invFour = Scalar{ - 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48, - 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08, - 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71, - 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, - } - // paramDTwist is -39082 in Fp. The D parameter of the twist curve. - paramDTwist = fp.Elt{ - 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - } -) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go deleted file mode 100644 index 1f165141a9..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go +++ /dev/null @@ -1,84 +0,0 @@ -// Package goldilocks provides elliptic curve operations over the goldilocks curve. -package goldilocks - -import fp "github.com/cloudflare/circl/math/fp448" - -// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2. -type Curve struct{} - -// Identity returns the identity point. -func (Curve) Identity() *Point { - return &Point{ - y: fp.One(), - z: fp.One(), - } -} - -// IsOnCurve returns true if the point lies on the curve. -func (Curve) IsOnCurve(P *Point) bool { - x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} - rhs, lhs := &fp.Elt{}, &fp.Elt{} - // Check z != 0 - eq0 := !fp.IsZero(&P.z) - - fp.Mul(t, &P.ta, &P.tb) // t = ta*tb - fp.Sqr(x2, &P.x) // x^2 - fp.Sqr(y2, &P.y) // y^2 - fp.Sqr(z2, &P.z) // z^2 - fp.Sqr(t2, t) // t^2 - fp.Add(lhs, x2, y2) // x^2 + y^2 - fp.Mul(rhs, t2, ¶mD) // dt^2 - fp.Add(rhs, rhs, z2) // z^2 + dt^2 - fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2) - eq1 := fp.IsZero(lhs) - - fp.Mul(lhs, &P.x, &P.y) // xy - fp.Mul(rhs, t, &P.z) // tz - fp.Sub(lhs, lhs, rhs) // xy - tz - eq2 := fp.IsZero(lhs) - - return eq0 && eq1 && eq2 -} - -// Generator returns the generator point. -func (Curve) Generator() *Point { - return &Point{ - x: genX, - y: genY, - z: fp.One(), - ta: genX, - tb: genY, - } -} - -// Order returns the number of points in the prime subgroup. -func (Curve) Order() Scalar { return order } - -// Double returns 2P. -func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R } - -// Add returns P+Q. -func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R } - -// ScalarMult returns kP. This function runs in constant time. -func (e Curve) ScalarMult(k *Scalar, P *Point) *Point { - k4 := &Scalar{} - k4.divBy4(k) - return e.pull(twistCurve{}.ScalarMult(k4, e.push(P))) -} - -// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time. -func (e Curve) ScalarBaseMult(k *Scalar) *Point { - k4 := &Scalar{} - k4.divBy4(k) - return e.pull(twistCurve{}.ScalarBaseMult(k4)) -} - -// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time. -func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point { - m4 := &Scalar{} - n4 := &Scalar{} - m4.divBy4(m) - n4.divBy4(n) - return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P))) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go deleted file mode 100644 index b1daab851c..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go +++ /dev/null @@ -1,52 +0,0 @@ -package goldilocks - -import fp "github.com/cloudflare/circl/math/fp448" - -func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) } -func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) } - -// push sends a point on the Goldilocks curve to a point on the twist curve. -func (Curve) push(P *Point) *twistPoint { - Q := &twistPoint{} - Px, Py, Pz := &P.x, &P.y, &P.z - a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb - fp.Add(e, Px, Py) // x+y - fp.Sqr(a, Px) // A = x^2 - fp.Sqr(b, Py) // B = y^2 - fp.Sqr(c, Pz) // z^2 - fp.Add(c, c, c) // C = 2*z^2 - *d = *a // D = A - fp.Sqr(e, e) // (x+y)^2 - fp.Sub(e, e, a) // (x+y)^2-A - fp.Sub(e, e, b) // E = (x+y)^2-A-B - fp.Add(h, b, d) // H = B+D - fp.Sub(g, b, d) // G = B-D - fp.Sub(f, c, h) // F = C-H - fp.Mul(&Q.z, f, g) // Z = F * G - fp.Mul(&Q.x, e, f) // X = E * F - fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H - return Q -} - -// push sends a point on the twist curve to a point on the Goldilocks curve. -func (twistCurve) push(P *twistPoint) *Point { - Q := &Point{} - Px, Py, Pz := &P.x, &P.y, &P.z - a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb - fp.Add(e, Px, Py) // x+y - fp.Sqr(a, Px) // A = x^2 - fp.Sqr(b, Py) // B = y^2 - fp.Sqr(c, Pz) // z^2 - fp.Add(c, c, c) // C = 2*z^2 - fp.Neg(d, a) // D = -A - fp.Sqr(e, e) // (x+y)^2 - fp.Sub(e, e, a) // (x+y)^2-A - fp.Sub(e, e, b) // E = (x+y)^2-A-B - fp.Add(h, b, d) // H = B+D - fp.Sub(g, b, d) // G = B-D - fp.Sub(f, c, h) // F = C-H - fp.Mul(&Q.z, f, g) // Z = F * G - fp.Mul(&Q.x, e, f) // X = E * F - fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H - return Q -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go deleted file mode 100644 index 11f73de054..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go +++ /dev/null @@ -1,171 +0,0 @@ -package goldilocks - -import ( - "errors" - "fmt" - - fp "github.com/cloudflare/circl/math/fp448" -) - -// Point is a point on the Goldilocks Curve. -type Point struct{ x, y, z, ta, tb fp.Elt } - -func (P Point) String() string { - return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) -} - -// FromAffine creates a point from affine coordinates. -func FromAffine(x, y *fp.Elt) (*Point, error) { - P := &Point{ - x: *x, - y: *y, - z: fp.One(), - ta: *x, - tb: *y, - } - if !(Curve{}).IsOnCurve(P) { - return P, errors.New("point not on curve") - } - return P, nil -} - -// isLessThan returns true if 0 <= x < y, and assumes that slices are of the -// same length and are interpreted in little-endian order. -func isLessThan(x, y []byte) bool { - i := len(x) - 1 - for i > 0 && x[i] == y[i] { - i-- - } - return x[i] < y[i] -} - -// FromBytes returns a point from the input buffer. -func FromBytes(in []byte) (*Point, error) { - if len(in) < fp.Size+1 { - return nil, errors.New("wrong input length") - } - err := errors.New("invalid decoding") - P := &Point{} - signX := in[fp.Size] >> 7 - copy(P.y[:], in[:fp.Size]) - p := fp.P() - if !isLessThan(P.y[:], p[:]) { - return nil, err - } - - u, v := &fp.Elt{}, &fp.Elt{} - one := fp.One() - fp.Sqr(u, &P.y) // u = y^2 - fp.Mul(v, u, ¶mD) // v = dy^2 - fp.Sub(u, u, &one) // u = y^2-1 - fp.Sub(v, v, &one) // v = dy^2-1 - isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) - if !isQR { - return nil, err - } - fp.Modp(&P.x) // x = x mod p - if fp.IsZero(&P.x) && signX == 1 { - return nil, err - } - if signX != (P.x[0] & 1) { - fp.Neg(&P.x, &P.x) - } - P.ta = P.x - P.tb = P.y - P.z = fp.One() - return P, nil -} - -// IsIdentity returns true is P is the identity Point. -func (P *Point) IsIdentity() bool { - return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z -} - -// IsEqual returns true if P is equivalent to Q. -func (P *Point) IsEqual(Q *Point) bool { - l, r := &fp.Elt{}, &fp.Elt{} - fp.Mul(l, &P.x, &Q.z) - fp.Mul(r, &Q.x, &P.z) - fp.Sub(l, l, r) - b := fp.IsZero(l) - fp.Mul(l, &P.y, &Q.z) - fp.Mul(r, &Q.y, &P.z) - fp.Sub(l, l, r) - b = b && fp.IsZero(l) - fp.Mul(l, &P.ta, &P.tb) - fp.Mul(l, l, &Q.z) - fp.Mul(r, &Q.ta, &Q.tb) - fp.Mul(r, r, &P.z) - fp.Sub(l, l, r) - b = b && fp.IsZero(l) - return b -} - -// Neg obtains the inverse of the Point. -func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) } - -// ToAffine returns the x,y affine coordinates of P. -func (P *Point) ToAffine() (x, y fp.Elt) { - fp.Inv(&P.z, &P.z) // 1/z - fp.Mul(&P.x, &P.x, &P.z) // x/z - fp.Mul(&P.y, &P.y, &P.z) // y/z - fp.Modp(&P.x) - fp.Modp(&P.y) - fp.SetOne(&P.z) - P.ta = P.x - P.tb = P.y - return P.x, P.y -} - -// ToBytes stores P into a slice of bytes. -func (P *Point) ToBytes(out []byte) error { - if len(out) < fp.Size+1 { - return errors.New("invalid decoding") - } - x, y := P.ToAffine() - out[fp.Size] = (x[0] & 1) << 7 - return fp.ToBytes(out[:fp.Size], &y) -} - -// MarshalBinary encodes the receiver into a binary form and returns the result. -func (P *Point) MarshalBinary() (data []byte, err error) { - data = make([]byte, fp.Size+1) - err = P.ToBytes(data[:fp.Size+1]) - return data, err -} - -// UnmarshalBinary must be able to decode the form generated by MarshalBinary. -func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err } - -// Double sets P = 2Q. -func (P *Point) Double() { P.Add(P) } - -// Add sets P =P+Q.. -func (P *Point) Add(Q *Point) { - // This is formula (5) from "Twisted Edwards Curves Revisited" by - // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) - // https://doi.org/10.1007/978-3-540-89255-7_20 - x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb - x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb - x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb - A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} - t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{} - fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1 - fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2 - fp.Mul(A, x1, x2) // A = x1*x2 - fp.Mul(B, y1, y2) // B = y1*y2 - fp.Mul(C, t1, t2) // t1*t2 - fp.Mul(C, C, ¶mD) // C = d*t1*t2 - fp.Mul(D, z1, z2) // D = z1*z2 - fp.Add(F, x1, y1) // x1+y1 - fp.Add(E, x2, y2) // x2+y2 - fp.Mul(E, E, F) // (x1+y1)*(x2+y2) - fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A - fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B - fp.Sub(F, D, C) // F = D-C - fp.Add(G, D, C) // G = D+C - fp.Sub(H, B, A) // H = B-A - fp.Mul(z3, F, G) // Z = F * G - fp.Mul(x3, E, F) // X = E * F - fp.Mul(y3, G, H) // Y = G * H, T = E * H -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go deleted file mode 100644 index f98117b252..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go +++ /dev/null @@ -1,203 +0,0 @@ -package goldilocks - -import ( - "encoding/binary" - "math/bits" -) - -// ScalarSize is the size (in bytes) of scalars. -const ScalarSize = 56 // 448 / 8 - -// _N is the number of 64-bit words to store scalars. -const _N = 7 // 448 / 64 - -// Scalar represents a positive integer stored in little-endian order. -type Scalar [ScalarSize]byte - -type scalar64 [_N]uint64 - -func (z *scalar64) fromScalar(x *Scalar) { - z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8]) - z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8]) - z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8]) - z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8]) - z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8]) - z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8]) - z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8]) -} - -func (z *scalar64) toScalar(x *Scalar) { - binary.LittleEndian.PutUint64(x[0*8:1*8], z[0]) - binary.LittleEndian.PutUint64(x[1*8:2*8], z[1]) - binary.LittleEndian.PutUint64(x[2*8:3*8], z[2]) - binary.LittleEndian.PutUint64(x[3*8:4*8], z[3]) - binary.LittleEndian.PutUint64(x[4*8:5*8], z[4]) - binary.LittleEndian.PutUint64(x[5*8:6*8], z[5]) - binary.LittleEndian.PutUint64(x[6*8:7*8], z[6]) -} - -// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)). -func add(z, x, y []uint64) uint64 { - l, L, zz := len(x), len(y), y - if l > L { - l, L, zz = L, l, x - } - c := uint64(0) - for i := 0; i < l; i++ { - z[i], c = bits.Add64(x[i], y[i], c) - } - for i := l; i < L; i++ { - z[i], c = bits.Add64(zz[i], 0, c) - } - return c -} - -// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)). -func sub(z, x, y []uint64) uint64 { - l, L, zz := len(x), len(y), y - if l > L { - l, L, zz = L, l, x - } - c := uint64(0) - for i := 0; i < l; i++ { - z[i], c = bits.Sub64(x[i], y[i], c) - } - for i := l; i < L; i++ { - z[i], c = bits.Sub64(zz[i], 0, c) - } - return c -} - -// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1. -func mulWord(z, x []uint64, y uint64) { - for i := range z { - z[i] = 0 - } - carry := uint64(0) - for i := range x { - hi, lo := bits.Mul64(x[i], y) - lo, cc := bits.Add64(lo, z[i], 0) - hi, _ = bits.Add64(hi, 0, cc) - z[i], cc = bits.Add64(lo, carry, 0) - carry, _ = bits.Add64(hi, 0, cc) - } - z[len(x)] = carry -} - -// Cmov moves x into z if b=1. -func (z *scalar64) Cmov(b uint64, x *scalar64) { - m := uint64(0) - b - for i := range z { - z[i] = (z[i] &^ m) | (x[i] & m) - } -} - -// leftShift shifts to the left the words of z returning the more significant word. -func (z *scalar64) leftShift(low uint64) uint64 { - high := z[_N-1] - for i := _N - 1; i > 0; i-- { - z[i] = z[i-1] - } - z[0] = low - return high -} - -// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar. -func (z *scalar64) reduceOneWord(x uint64) { - prod := (&scalar64{})[:] - mulWord(prod, residue448[:], x) - cc := add(z[:], z[:], prod) - mulWord(prod, residue448[:], cc) - add(z[:], z[:], prod) -} - -// modOrder reduces z mod order. -func (z *scalar64) modOrder() { - var o64, x scalar64 - o64.fromScalar(&order) - // Performs: while (z >= order) { z = z-order } - // At most 8 (eight) iterations reduce 3 bits by subtracting. - for i := 0; i < 8; i++ { - c := sub(x[:], z[:], o64[:]) // (c || x) = z-order - z.Cmov(1-c, &x) // if c != 0 { z = x } - } -} - -// FromBytes stores z = x mod order, where x is a number stored in little-endian order. -func (z *Scalar) FromBytes(x []byte) { - n := len(x) - nCeil := (n + 7) >> 3 - for i := range z { - z[i] = 0 - } - if nCeil < _N { - copy(z[:], x) - return - } - copy(z[:], x[8*(nCeil-_N):]) - var z64 scalar64 - z64.fromScalar(z) - for i := nCeil - _N - 1; i >= 0; i-- { - low := binary.LittleEndian.Uint64(x[8*i:]) - high := z64.leftShift(low) - z64.reduceOneWord(high) - } - z64.modOrder() - z64.toScalar(z) -} - -// divBy4 calculates z = x/4 mod order. -func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) } - -// Red reduces z mod order. -func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) } - -// Neg calculates z = -z mod order. -func (z *Scalar) Neg() { z.Sub(&order, z) } - -// Add calculates z = x+y mod order. -func (z *Scalar) Add(x, y *Scalar) { - var z64, x64, y64, t scalar64 - x64.fromScalar(x) - y64.fromScalar(y) - c := add(z64[:], x64[:], y64[:]) - add(t[:], z64[:], residue448[:]) - z64.Cmov(c, &t) - z64.modOrder() - z64.toScalar(z) -} - -// Sub calculates z = x-y mod order. -func (z *Scalar) Sub(x, y *Scalar) { - var z64, x64, y64, t scalar64 - x64.fromScalar(x) - y64.fromScalar(y) - c := sub(z64[:], x64[:], y64[:]) - sub(t[:], z64[:], residue448[:]) - z64.Cmov(c, &t) - z64.modOrder() - z64.toScalar(z) -} - -// Mul calculates z = x*y mod order. -func (z *Scalar) Mul(x, y *Scalar) { - var z64, x64, y64 scalar64 - prod := (&[_N + 1]uint64{})[:] - x64.fromScalar(x) - y64.fromScalar(y) - mulWord(prod, x64[:], y64[_N-1]) - copy(z64[:], prod[:_N]) - z64.reduceOneWord(prod[_N]) - for i := _N - 2; i >= 0; i-- { - h := z64.leftShift(0) - z64.reduceOneWord(h) - mulWord(prod, x64[:], y64[i]) - c := add(z64[:], z64[:], prod[:_N]) - z64.reduceOneWord(prod[_N] + c) - } - z64.modOrder() - z64.toScalar(z) -} - -// IsZero returns true if z=0. -func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} } diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go deleted file mode 100644 index 83d7cdadd3..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go +++ /dev/null @@ -1,138 +0,0 @@ -package goldilocks - -import ( - "crypto/subtle" - "math/bits" - - "github.com/cloudflare/circl/internal/conv" - "github.com/cloudflare/circl/math" - fp "github.com/cloudflare/circl/math/fp448" -) - -// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks. -type twistCurve struct{} - -// Identity returns the identity point. -func (twistCurve) Identity() *twistPoint { - return &twistPoint{ - y: fp.One(), - z: fp.One(), - } -} - -// subYDiv16 update x = (x - y) / 16. -func subYDiv16(x *scalar64, y int64) { - s := uint64(y >> 63) - x0, b0 := bits.Sub64((*x)[0], uint64(y), 0) - x1, b1 := bits.Sub64((*x)[1], s, b0) - x2, b2 := bits.Sub64((*x)[2], s, b1) - x3, b3 := bits.Sub64((*x)[3], s, b2) - x4, b4 := bits.Sub64((*x)[4], s, b3) - x5, b5 := bits.Sub64((*x)[5], s, b4) - x6, _ := bits.Sub64((*x)[6], s, b5) - x[0] = (x0 >> 4) | (x1 << 60) - x[1] = (x1 >> 4) | (x2 << 60) - x[2] = (x2 >> 4) | (x3 << 60) - x[3] = (x3 >> 4) | (x4 << 60) - x[4] = (x4 >> 4) | (x5 << 60) - x[5] = (x5 >> 4) | (x6 << 60) - x[6] = (x6 >> 4) -} - -func recodeScalar(d *[113]int8, k *Scalar) { - var k64 scalar64 - k64.fromScalar(k) - for i := 0; i < 112; i++ { - d[i] = int8((k64[0] & 0x1f) - 16) - subYDiv16(&k64, int64(d[i])) - } - d[112] = int8(k64[0]) -} - -// ScalarMult returns kP. -func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint { - var TabP [8]preTwistPointProy - var S preTwistPointProy - var d [113]int8 - - var isZero int - if k.IsZero() { - isZero = 1 - } - subtle.ConstantTimeCopy(isZero, k[:], order[:]) - - minusK := *k - isEven := 1 - int(k[0]&0x1) - minusK.Neg() - subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) - recodeScalar(&d, k) - - P.oddMultiples(TabP[:]) - Q := e.Identity() - for i := 112; i >= 0; i-- { - Q.Double() - Q.Double() - Q.Double() - Q.Double() - mask := d[i] >> 7 - absDi := (d[i] + mask) ^ mask - inx := int32((absDi - 1) >> 1) - sig := int((d[i] >> 7) & 0x1) - for j := range TabP { - S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j)))) - } - S.cneg(sig) - Q.mixAdd(&S) - } - Q.cneg(uint(isEven)) - return Q -} - -const ( - omegaFix = 7 - omegaVar = 5 -) - -// CombinedMult returns mG+nP. -func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint { - nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix) - nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar) - - if len(nafFix) > len(nafVar) { - nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) - } else if len(nafFix) < len(nafVar) { - nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) - } - - var TabQ [1 << (omegaVar - 2)]preTwistPointProy - P.oddMultiples(TabQ[:]) - Q := e.Identity() - for i := len(nafFix) - 1; i >= 0; i-- { - Q.Double() - // Generator point - if nafFix[i] != 0 { - idxM := absolute(nafFix[i]) >> 1 - R := tabVerif[idxM] - if nafFix[i] < 0 { - R.neg() - } - Q.mixAddZ1(&R) - } - // Variable input point - if nafVar[i] != 0 { - idxN := absolute(nafVar[i]) >> 1 - S := TabQ[idxN] - if nafVar[i] < 0 { - S.neg() - } - Q.mixAdd(&S) - } - } - return Q -} - -// absolute returns always a positive value. -func absolute(x int32) int32 { - mask := x >> 31 - return (x + mask) ^ mask -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go deleted file mode 100644 index c55db77b06..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go +++ /dev/null @@ -1,135 +0,0 @@ -package goldilocks - -import ( - "fmt" - - fp "github.com/cloudflare/circl/math/fp448" -) - -type twistPoint struct{ x, y, z, ta, tb fp.Elt } - -type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt } - -type preTwistPointProy struct { - preTwistPointAffine - z2 fp.Elt -} - -func (P *twistPoint) String() string { - return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) -} - -// cneg conditionally negates the point if b=1. -func (P *twistPoint) cneg(b uint) { - t := &fp.Elt{} - fp.Neg(t, &P.x) - fp.Cmov(&P.x, t, b) - fp.Neg(t, &P.ta) - fp.Cmov(&P.ta, t, b) -} - -// Double updates P with 2P. -func (P *twistPoint) Double() { - // This is formula (7) from "Twisted Edwards Curves Revisited" by - // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) - // https://doi.org/10.1007/978-3-540-89255-7_20 - Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb - a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb - fp.Add(e, Px, Py) // x+y - fp.Sqr(a, Px) // A = x^2 - fp.Sqr(b, Py) // B = y^2 - fp.Sqr(c, Pz) // z^2 - fp.Add(c, c, c) // C = 2*z^2 - fp.Add(h, a, b) // H = A+B - fp.Sqr(e, e) // (x+y)^2 - fp.Sub(e, e, h) // E = (x+y)^2-A-B - fp.Sub(g, b, a) // G = B-A - fp.Sub(f, c, g) // F = C-G - fp.Mul(Pz, f, g) // Z = F * G - fp.Mul(Px, e, f) // X = E * F - fp.Mul(Py, g, h) // Y = G * H, T = E * H -} - -// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1. -func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) { - fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1) - P.coreAddition(Q) -} - -// coreAddition calculates P=P+Q for curves with A=-1. -func (P *twistPoint) coreAddition(Q *preTwistPointAffine) { - // This is the formula following (5) from "Twisted Edwards Curves Revisited" by - // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) - // https://doi.org/10.1007/978-3-540-89255-7_20 - Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb - addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 - a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb - fp.Mul(c, Pta, Ptb) // t1 = ta*tb - fp.Sub(h, Py, Px) // y1-x1 - fp.Add(b, Py, Px) // y1+x1 - fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) - fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) - fp.Mul(c, c, dt2) // C = 2*D*t1*t2 - fp.Sub(e, b, a) // E = B-A - fp.Add(h, b, a) // H = B+A - fp.Sub(f, d, c) // F = D-C - fp.Add(g, d, c) // G = D+C - fp.Mul(Pz, f, g) // Z = F * G - fp.Mul(Px, e, f) // X = E * F - fp.Mul(Py, g, h) // Y = G * H, T = E * H -} - -func (P *preTwistPointAffine) neg() { - P.addYX, P.subYX = P.subYX, P.addYX - fp.Neg(&P.dt2, &P.dt2) -} - -func (P *preTwistPointAffine) cneg(b int) { - t := &fp.Elt{} - fp.Cswap(&P.addYX, &P.subYX, uint(b)) - fp.Neg(t, &P.dt2) - fp.Cmov(&P.dt2, t, uint(b)) -} - -func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) { - fp.Cmov(&P.addYX, &Q.addYX, b) - fp.Cmov(&P.subYX, &Q.subYX, b) - fp.Cmov(&P.dt2, &Q.dt2, b) -} - -// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1. -func (P *twistPoint) mixAdd(Q *preTwistPointProy) { - fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 - P.coreAddition(&Q.preTwistPointAffine) -} - -// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T). -func (P *twistPoint) oddMultiples(T []preTwistPointProy) { - if n := len(T); n > 0 { - T[0].FromTwistPoint(P) - _2P := *P - _2P.Double() - R := &preTwistPointProy{} - R.FromTwistPoint(&_2P) - for i := 1; i < n; i++ { - P.mixAdd(R) - T[i].FromTwistPoint(P) - } - } -} - -// cmov conditionally moves Q into P if b=1. -func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) { - P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b) - fp.Cmov(&P.z2, &Q.z2, b) -} - -// FromTwistPoint precomputes some coordinates of Q for missed addition. -func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) { - fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y - fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X - fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb - fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T - fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T - fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go deleted file mode 100644 index ed432e02c7..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go +++ /dev/null @@ -1,216 +0,0 @@ -package goldilocks - -import fp "github.com/cloudflare/circl/math/fp448" - -var tabFixMult = [fxV][fx2w1]preTwistPointAffine{ - { - { - addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, - subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, - dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, - }, - { - addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab}, - subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70}, - dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c}, - }, - { - addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb}, - subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d}, - dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde}, - }, - { - addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33}, - subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab}, - dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4}, - }, - }, - { - { - addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d}, - subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3}, - dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58}, - }, - { - addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf}, - subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49}, - dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68}, - }, - { - addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb}, - subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c}, - dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb}, - }, - { - addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66}, - subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd}, - dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff}, - }, - }, -} - -// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where -// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a -// 4-degree isogeny. -var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{ - { /* 1P*/ - addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, - subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, - dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, - }, - { /* 3P*/ - addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5}, - subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4}, - dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d}, - }, - { /* 5P*/ - addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f}, - subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38}, - dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82}, - }, - { /* 7P*/ - addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82}, - subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9}, - dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11}, - }, - { /* 9P*/ - addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac}, - subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90}, - dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10}, - }, - { /* 11P*/ - addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3}, - subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63}, - dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3}, - }, - { /* 13P*/ - addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b}, - subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c}, - dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7}, - }, - { /* 15P*/ - addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b}, - subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0}, - dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78}, - }, - { /* 17P*/ - addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97}, - subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6}, - dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5}, - }, - { /* 19P*/ - addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed}, - subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8}, - dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48}, - }, - { /* 21P*/ - addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02}, - subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62}, - dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f}, - }, - { /* 23P*/ - addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c}, - subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3}, - dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1}, - }, - { /* 25P*/ - addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82}, - subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06}, - dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44}, - }, - { /* 27P*/ - addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a}, - subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c}, - dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee}, - }, - { /* 29P*/ - addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f}, - subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82}, - dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e}, - }, - { /* 31P*/ - addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1}, - subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae}, - dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81}, - }, - { /* 33P*/ - addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a}, - subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34}, - dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2}, - }, - { /* 35P*/ - addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a}, - subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a}, - dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e}, - }, - { /* 37P*/ - addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17}, - subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98}, - dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7}, - }, - { /* 39P*/ - addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb}, - subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2}, - dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec}, - }, - { /* 41P*/ - addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15}, - subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51}, - dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49}, - }, - { /* 43P*/ - addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa}, - subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32}, - dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0}, - }, - { /* 45P*/ - addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89}, - subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4}, - dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1}, - }, - { /* 47P*/ - addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a}, - subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91}, - dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f}, - }, - { /* 49P*/ - addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09}, - subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89}, - dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b}, - }, - { /* 51P*/ - addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0}, - subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83}, - dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50}, - }, - { /* 53P*/ - addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3}, - subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63}, - dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18}, - }, - { /* 55P*/ - addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d}, - subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0}, - dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5}, - }, - { /* 57P*/ - addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b}, - subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88}, - dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb}, - }, - { /* 59P*/ - addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33}, - subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb}, - dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10}, - }, - { /* 61P*/ - addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04}, - subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13}, - dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75}, - }, - { /* 63P*/ - addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76}, - subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01}, - dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0}, - }, -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go deleted file mode 100644 index f6ac5edbbb..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go +++ /dev/null @@ -1,62 +0,0 @@ -package goldilocks - -import ( - "crypto/subtle" - - mlsb "github.com/cloudflare/circl/math/mlsbset" -) - -const ( - // MLSBRecoding parameters - fxT = 448 - fxV = 2 - fxW = 3 - fx2w1 = 1 << (uint(fxW) - 1) -) - -// ScalarBaseMult returns kG where G is the generator point. -func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint { - m, err := mlsb.New(fxT, fxV, fxW) - if err != nil { - panic(err) - } - if m.IsExtended() { - panic("not extended") - } - - var isZero int - if k.IsZero() { - isZero = 1 - } - subtle.ConstantTimeCopy(isZero, k[:], order[:]) - - minusK := *k - isEven := 1 - int(k[0]&0x1) - minusK.Neg() - subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) - c, err := m.Encode(k[:]) - if err != nil { - panic(err) - } - - gP := c.Exp(groupMLSB{}) - P := gP.(*twistPoint) - P.cneg(uint(isEven)) - return P -} - -type groupMLSB struct{} - -func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil } -func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() } -func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) } -func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() } -func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} } -func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) { - Tabj := &tabFixMult[v] - P := a.(*preTwistPointAffine) - for k := range Tabj { - P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u))) - } - P.cneg(int(s >> 31)) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/conv/conv.go deleted file mode 100644 index 3fd0df496f..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/conv/conv.go +++ /dev/null @@ -1,173 +0,0 @@ -package conv - -import ( - "encoding/binary" - "fmt" - "math/big" - "strings" - - "golang.org/x/crypto/cryptobyte" -) - -// BytesLe2Hex returns an hexadecimal string of a number stored in a -// little-endian order slice x. -func BytesLe2Hex(x []byte) string { - b := &strings.Builder{} - b.Grow(2*len(x) + 2) - fmt.Fprint(b, "0x") - if len(x) == 0 { - fmt.Fprint(b, "00") - } - for i := len(x) - 1; i >= 0; i-- { - fmt.Fprintf(b, "%02x", x[i]) - } - return b.String() -} - -// BytesLe2BigInt converts a little-endian slice x into a big-endian -// math/big.Int. -func BytesLe2BigInt(x []byte) *big.Int { - n := len(x) - b := new(big.Int) - if len(x) > 0 { - y := make([]byte, n) - for i := 0; i < n; i++ { - y[n-1-i] = x[i] - } - b.SetBytes(y) - } - return b -} - -// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64. -func BytesBe2Uint64Le(x []byte) []uint64 { - l := len(x) - z := make([]uint64, (l+7)/8) - blocks := l / 8 - for i := 0; i < blocks; i++ { - z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):]) - } - remBytes := l % 8 - for i := 0; i < remBytes; i++ { - z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i) - } - return z -} - -// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z. -// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). -// If x does not fit in the slice or is negative, z is not modified. -func BigInt2BytesLe(z []byte, x *big.Int) { - xLen := (x.BitLen() + 7) >> 3 - zLen := len(z) - if zLen >= xLen && x.Sign() >= 0 { - y := x.Bytes() - for i := 0; i < xLen; i++ { - z[i] = y[xLen-1-i] - } - for i := xLen; i < zLen; i++ { - z[i] = 0 - } - } -} - -// Uint64Le2BigInt converts a little-endian slice x into a big number. -func Uint64Le2BigInt(x []uint64) *big.Int { - n := len(x) - b := new(big.Int) - var bi big.Int - for i := n - 1; i >= 0; i-- { - bi.SetUint64(x[i]) - b.Lsh(b, 64) - b.Add(b, &bi) - } - return b -} - -// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes. -func Uint64Le2BytesLe(x []uint64) []byte { - b := make([]byte, 8*len(x)) - n := len(x) - for i := 0; i < n; i++ { - binary.LittleEndian.PutUint64(b[i*8:], x[i]) - } - return b -} - -// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes. -func Uint64Le2BytesBe(x []uint64) []byte { - b := make([]byte, 8*len(x)) - n := len(x) - for i := 0; i < n; i++ { - binary.BigEndian.PutUint64(b[i*8:], x[n-1-i]) - } - return b -} - -// Uint64Le2Hex returns an hexadecimal string of a number stored in a -// little-endian order slice x. -func Uint64Le2Hex(x []uint64) string { - b := new(strings.Builder) - b.Grow(16*len(x) + 2) - fmt.Fprint(b, "0x") - if len(x) == 0 { - fmt.Fprint(b, "00") - } - for i := len(x) - 1; i >= 0; i-- { - fmt.Fprintf(b, "%016x", x[i]) - } - return b.String() -} - -// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z. -// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). -// If x does not fit in the slice or is negative, z is not modified. -func BigInt2Uint64Le(z []uint64, x *big.Int) { - xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words - zLen := len(z) - if zLen >= xLen && x.Sign() > 0 { - var y, yi big.Int - y.Set(x) - two64 := big.NewInt(1) - two64.Lsh(two64, 64).Sub(two64, big.NewInt(1)) - for i := 0; i < xLen; i++ { - yi.And(&y, two64) - z[i] = yi.Uint64() - y.Rsh(&y, 64) - } - } - for i := xLen; i < zLen; i++ { - z[i] = 0 - } -} - -// MarshalBinary encodes a value into a byte array in a format readable by UnmarshalBinary. -func MarshalBinary(v cryptobyte.MarshalingValue) ([]byte, error) { - const DefaultSize = 32 - b := cryptobyte.NewBuilder(make([]byte, 0, DefaultSize)) - b.AddValue(v) - return b.Bytes() -} - -// MarshalBinaryLen encodes a value into an array of n bytes in a format readable by UnmarshalBinary. -func MarshalBinaryLen(v cryptobyte.MarshalingValue, length uint) ([]byte, error) { - b := cryptobyte.NewFixedBuilder(make([]byte, 0, length)) - b.AddValue(v) - return b.Bytes() -} - -// A UnmarshalingValue decodes itself from a cryptobyte.String and advances the pointer. -// It reports whether the read was successful. -type UnmarshalingValue interface { - Unmarshal(*cryptobyte.String) bool -} - -// UnmarshalBinary recovers a value from a byte array. -// It returns an error if the read was unsuccessful. -func UnmarshalBinary(v UnmarshalingValue, data []byte) (err error) { - s := cryptobyte.String(data) - if data == nil || !v.Unmarshal(&s) || !s.Empty() { - err = fmt.Errorf("cannot read %T from input string", v) - } - return -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/doc.go deleted file mode 100644 index 7e02309070..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha3 implements the SHA-3 fixed-output-length hash functions and -// the SHAKE variable-output-length hash functions defined by FIPS-202. -// -// Both types of hash function use the "sponge" construction and the Keccak -// permutation. For a detailed specification see http://keccak.noekeon.org/ -// -// # Guidance -// -// If you aren't sure what function you need, use SHAKE256 with at least 64 -// bytes of output. The SHAKE instances are faster than the SHA3 instances; -// the latter have to allocate memory to conform to the hash.Hash interface. -// -// If you need a secret-key MAC (message authentication code), prepend the -// secret key to the input, hash with SHAKE256 and read at least 32 bytes of -// output. -// -// # Security strengths -// -// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security -// strength against preimage attacks of x bits. Since they only produce "x" -// bits of output, their collision-resistance is only "x/2" bits. -// -// The SHAKE-256 and -128 functions have a generic security strength of 256 and -// 128 bits against all attacks, provided that at least 2x bits of their output -// is used. Requesting more than 64 or 32 bytes of output, respectively, does -// not increase the collision-resistance of the SHAKE functions. -// -// # The sponge construction -// -// A sponge builds a pseudo-random function from a public pseudo-random -// permutation, by applying the permutation to a state of "rate + capacity" -// bytes, but hiding "capacity" of the bytes. -// -// A sponge starts out with a zero state. To hash an input using a sponge, up -// to "rate" bytes of the input are XORed into the sponge's state. The sponge -// is then "full" and the permutation is applied to "empty" it. This process is -// repeated until all the input has been "absorbed". The input is then padded. -// The digest is "squeezed" from the sponge in the same way, except that output -// is copied out instead of input being XORed in. -// -// A sponge is parameterized by its generic security strength, which is equal -// to half its capacity; capacity + rate is equal to the permutation's width. -// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means -// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. -// -// # Recommendations -// -// The SHAKE functions are recommended for most new uses. They can produce -// output of arbitrary length. SHAKE256, with an output length of at least -// 64 bytes, provides 256-bit security against all attacks. The Keccak team -// recommends it for most applications upgrading from SHA2-512. (NIST chose a -// much stronger, but much slower, sponge instance for SHA3-512.) -// -// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. -// They produce output of the same length, with the same security strengths -// against all attacks. This means, in particular, that SHA3-256 only has -// 128-bit collision resistance, because its output length is 32 bytes. -package sha3 diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go deleted file mode 100644 index 7d2365a76e..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// This file provides functions for creating instances of the SHA-3 -// and SHAKE hash functions, as well as utility functions for hashing -// bytes. - -// New224 creates a new SHA3-224 hash. -// Its generic security strength is 224 bits against preimage attacks, -// and 112 bits against collision attacks. -func New224() State { - return State{rate: 144, outputLen: 28, dsbyte: 0x06} -} - -// New256 creates a new SHA3-256 hash. -// Its generic security strength is 256 bits against preimage attacks, -// and 128 bits against collision attacks. -func New256() State { - return State{rate: 136, outputLen: 32, dsbyte: 0x06} -} - -// New384 creates a new SHA3-384 hash. -// Its generic security strength is 384 bits against preimage attacks, -// and 192 bits against collision attacks. -func New384() State { - return State{rate: 104, outputLen: 48, dsbyte: 0x06} -} - -// New512 creates a new SHA3-512 hash. -// Its generic security strength is 512 bits against preimage attacks, -// and 256 bits against collision attacks. -func New512() State { - return State{rate: 72, outputLen: 64, dsbyte: 0x06} -} - -// Sum224 returns the SHA3-224 digest of the data. -func Sum224(data []byte) (digest [28]byte) { - h := New224() - _, _ = h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum256 returns the SHA3-256 digest of the data. -func Sum256(data []byte) (digest [32]byte) { - h := New256() - _, _ = h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum384 returns the SHA3-384 digest of the data. -func Sum384(data []byte) (digest [48]byte) { - h := New384() - _, _ = h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum512 returns the SHA3-512 digest of the data. -func Sum512(data []byte) (digest [64]byte) { - h := New512() - _, _ = h.Write(data) - h.Sum(digest[:0]) - return -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go deleted file mode 100644 index 1755fd1e6d..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// KeccakF1600 applies the Keccak permutation to a 1600b-wide -// state represented as a slice of 25 uint64s. -// If turbo is true, applies the 12-round variant instead of the -// regular 24-round variant. -// nolint:funlen -func KeccakF1600(a *[25]uint64, turbo bool) { - // Implementation translated from Keccak-inplace.c - // in the keccak reference code. - var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 - - i := 0 - - if turbo { - i = 12 - } - - for ; i < 24; i += 4 { - // Combines the 5 steps in each round into 2 steps. - // Unrolls 4 rounds per loop and spreads some steps across rounds. - - // Round 1 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[6] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[12] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[18] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[24] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i] - a[6] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[16] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[22] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[3] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[10] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[1] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[7] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[19] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[20] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[11] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[23] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[4] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[5] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[2] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[8] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[14] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[15] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - // Round 2 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[16] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[7] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[23] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[14] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1] - a[16] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[11] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[2] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[18] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[20] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[6] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[22] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[4] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[15] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[1] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[8] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[24] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[10] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[12] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[3] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[19] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[5] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - // Round 3 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[11] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[22] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[8] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[19] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2] - a[11] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[1] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[12] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[23] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[15] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[16] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[2] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[24] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[5] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[6] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[3] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[14] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[20] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[7] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[18] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[4] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[10] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - // Round 4 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[1] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[2] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[3] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[4] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3] - a[1] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[6] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[7] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[8] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[5] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[11] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[12] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[14] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[10] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[16] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[18] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[19] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[15] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[22] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[23] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[24] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[20] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - } -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/rc.go deleted file mode 100644 index 6a3df42f30..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/rc.go +++ /dev/null @@ -1,29 +0,0 @@ -package sha3 - -// RC stores the round constants for use in the ι step. -var RC = [24]uint64{ - 0x0000000000000001, - 0x0000000000008082, - 0x800000000000808A, - 0x8000000080008000, - 0x000000000000808B, - 0x0000000080000001, - 0x8000000080008081, - 0x8000000000008009, - 0x000000000000008A, - 0x0000000000000088, - 0x0000000080008009, - 0x000000008000000A, - 0x000000008000808B, - 0x800000000000008B, - 0x8000000000008089, - 0x8000000000008003, - 0x8000000000008002, - 0x8000000000000080, - 0x000000000000800A, - 0x800000008000000A, - 0x8000000080008081, - 0x8000000000008080, - 0x0000000080000001, - 0x8000000080008008, -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go deleted file mode 100644 index a0df5aa6c5..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// spongeDirection indicates the direction bytes are flowing through the sponge. -type spongeDirection int - -const ( - // spongeAbsorbing indicates that the sponge is absorbing input. - spongeAbsorbing spongeDirection = iota - // spongeSqueezing indicates that the sponge is being squeezed. - spongeSqueezing -) - -const ( - // maxRate is the maximum size of the internal buffer. SHAKE-256 - // currently needs the largest buffer. - maxRate = 168 -) - -func (d *State) buf() []byte { - return d.storage.asBytes()[d.bufo:d.bufe] -} - -type State struct { - // Generic sponge components. - a [25]uint64 // main state of the hash - rate int // the number of bytes of state to use - - bufo int // offset of buffer in storage - bufe int // end of buffer in storage - - // dsbyte contains the "domain separation" bits and the first bit of - // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the - // SHA-3 and SHAKE functions by appending bitstrings to the message. - // Using a little-endian bit-ordering convention, these are "01" for SHA-3 - // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the - // padding rule from section 5.1 is applied to pad the message to a multiple - // of the rate, which involves adding a "1" bit, zero or more "0" bits, and - // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, - // giving 00000110b (0x06) and 00011111b (0x1f). - // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf - // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and - // Extendable-Output Functions (May 2014)" - dsbyte byte - - storage storageBuf - - // Specific to SHA-3 and SHAKE. - outputLen int // the default output size in bytes - state spongeDirection // whether the sponge is absorbing or squeezing - turbo bool // Whether we're using 12 rounds instead of 24 -} - -// BlockSize returns the rate of sponge underlying this hash function. -func (d *State) BlockSize() int { return d.rate } - -// Size returns the output size of the hash function in bytes. -func (d *State) Size() int { return d.outputLen } - -// Reset clears the internal state by zeroing the sponge state and -// the byte buffer, and setting Sponge.state to absorbing. -func (d *State) Reset() { - // Zero the permutation's state. - for i := range d.a { - d.a[i] = 0 - } - d.state = spongeAbsorbing - d.bufo = 0 - d.bufe = 0 -} - -func (d *State) clone() *State { - ret := *d - return &ret -} - -// permute applies the KeccakF-1600 permutation. It handles -// any input-output buffering. -func (d *State) permute() { - switch d.state { - case spongeAbsorbing: - // If we're absorbing, we need to xor the input into the state - // before applying the permutation. - xorIn(d, d.buf()) - d.bufe = 0 - d.bufo = 0 - KeccakF1600(&d.a, d.turbo) - case spongeSqueezing: - // If we're squeezing, we need to apply the permutation before - // copying more output. - KeccakF1600(&d.a, d.turbo) - d.bufe = d.rate - d.bufo = 0 - copyOut(d, d.buf()) - } -} - -// pads appends the domain separation bits in dsbyte, applies -// the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *State) padAndPermute(dsbyte byte) { - // Pad with this instance's domain-separator bits. We know that there's - // at least one byte of space in d.buf() because, if it were full, - // permute would have been called to empty it. dsbyte also contains the - // first one bit for the padding. See the comment in the state struct. - zerosStart := d.bufe + 1 - d.bufe = d.rate - buf := d.buf() - buf[zerosStart-1] = dsbyte - for i := zerosStart; i < d.rate; i++ { - buf[i] = 0 - } - // This adds the final one bit for the padding. Because of the way that - // bits are numbered from the LSB upwards, the final bit is the MSB of - // the last byte. - buf[d.rate-1] ^= 0x80 - // Apply the permutation - d.permute() - d.state = spongeSqueezing - d.bufe = d.rate - copyOut(d, buf) -} - -// Write absorbs more data into the hash's state. It produces an error -// if more data is written to the ShakeHash after writing -func (d *State) Write(p []byte) (written int, err error) { - if d.state != spongeAbsorbing { - panic("sha3: write to sponge after read") - } - written = len(p) - - for len(p) > 0 { - bufl := d.bufe - d.bufo - if bufl == 0 && len(p) >= d.rate { - // The fast path; absorb a full "rate" bytes of input and apply the permutation. - xorIn(d, p[:d.rate]) - p = p[d.rate:] - KeccakF1600(&d.a, d.turbo) - } else { - // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - bufl - if todo > len(p) { - todo = len(p) - } - d.bufe += todo - buf := d.buf() - copy(buf[bufl:], p[:todo]) - p = p[todo:] - - // If the sponge is full, apply the permutation. - if d.bufe == d.rate { - d.permute() - } - } - } - - return written, nil -} - -// Read squeezes an arbitrary number of bytes from the sponge. -func (d *State) Read(out []byte) (n int, err error) { - // If we're still absorbing, pad and apply the permutation. - if d.state == spongeAbsorbing { - d.padAndPermute(d.dsbyte) - } - - n = len(out) - - // Now, do the squeezing. - for len(out) > 0 { - buf := d.buf() - n := copy(out, buf) - d.bufo += n - out = out[n:] - - // Apply the permutation if we've squeezed the sponge dry. - if d.bufo == d.bufe { - d.permute() - } - } - - return -} - -// Sum applies padding to the hash state and then squeezes out the desired -// number of output bytes. -func (d *State) Sum(in []byte) []byte { - // Make a copy of the original hash so that caller can keep writing - // and summing. - dup := d.clone() - hash := make([]byte, dup.outputLen) - _, _ = dup.Read(hash) - return append(in, hash...) -} - -func (d *State) IsAbsorbing() bool { - return d.state == spongeAbsorbing -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s deleted file mode 100644 index 8a4458f63f..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!appengine - -#include "textflag.h" - -// func kimd(function code, chain *[200]byte, src []byte) -TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 - MOVD function+0(FP), R0 - MOVD chain+8(FP), R1 - LMG src+16(FP), R2, R3 // R2=base, R3=len - -continue: - WORD $0xB93E0002 // KIMD --, R2 - BVS continue // continue if interrupted - MOVD $0, R0 // reset R0 for pre-go1.8 compilers - RET - -// func klmd(function code, chain *[200]byte, dst, src []byte) -TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 - // TODO: SHAKE support - MOVD function+0(FP), R0 - MOVD chain+8(FP), R1 - LMG dst+16(FP), R2, R3 // R2=base, R3=len - LMG src+40(FP), R4, R5 // R4=base, R5=len - -continue: - WORD $0xB93F0024 // KLMD R2, R4 - BVS continue // continue if interrupted - MOVD $0, R0 // reset R0 for pre-go1.8 compilers - RET diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/shake.go deleted file mode 100644 index 77817f758c..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/shake.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// This file defines the ShakeHash interface, and provides -// functions for creating SHAKE and cSHAKE instances, as well as utility -// functions for hashing bytes to arbitrary-length output. -// -// -// SHAKE implementation is based on FIPS PUB 202 [1] -// cSHAKE implementations is based on NIST SP 800-185 [2] -// -// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf -// [2] https://doi.org/10.6028/NIST.SP.800-185 - -import ( - "io" -) - -// ShakeHash defines the interface to hash functions that -// support arbitrary-length output. -type ShakeHash interface { - // Write absorbs more data into the hash's state. It panics if input is - // written to it after output has been read from it. - io.Writer - - // Read reads more output from the hash; reading affects the hash's - // state. (ShakeHash.Read is thus very different from Hash.Sum) - // It never returns an error. - io.Reader - - // Clone returns a copy of the ShakeHash in its current state. - Clone() ShakeHash - - // Reset resets the ShakeHash to its initial state. - Reset() -} - -// Consts for configuring initial SHA-3 state -const ( - dsbyteShake = 0x1f - rate128 = 168 - rate256 = 136 -) - -// Clone returns copy of SHAKE context within its current state. -func (d *State) Clone() ShakeHash { - return d.clone() -} - -// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. -// Its generic security strength is 128 bits against all attacks if at -// least 32 bytes of its output are used. -func NewShake128() State { - return State{rate: rate128, dsbyte: dsbyteShake} -} - -// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash. -// Its generic security strength is 128 bits against all attacks if at -// least 32 bytes of its output are used. -// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. -func NewTurboShake128(D byte) State { - if D == 0 || D > 0x7f { - panic("turboshake: D out of range") - } - return State{rate: rate128, dsbyte: D, turbo: true} -} - -// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. -// Its generic security strength is 256 bits against all attacks if -// at least 64 bytes of its output are used. -func NewShake256() State { - return State{rate: rate256, dsbyte: dsbyteShake} -} - -// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash. -// Its generic security strength is 256 bits against all attacks if -// at least 64 bytes of its output are used. -// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. -func NewTurboShake256(D byte) State { - if D == 0 || D > 0x7f { - panic("turboshake: D out of range") - } - return State{rate: rate256, dsbyte: D, turbo: true} -} - -// ShakeSum128 writes an arbitrary-length digest of data into hash. -func ShakeSum128(hash, data []byte) { - h := NewShake128() - _, _ = h.Write(data) - _, _ = h.Read(hash) -} - -// ShakeSum256 writes an arbitrary-length digest of data into hash. -func ShakeSum256(hash, data []byte) { - h := NewShake256() - _, _ = h.Write(data) - _, _ = h.Read(hash) -} - -// TurboShakeSum128 writes an arbitrary-length digest of data into hash. -func TurboShakeSum128(hash, data []byte, D byte) { - h := NewTurboShake128(D) - _, _ = h.Write(data) - _, _ = h.Read(hash) -} - -// TurboShakeSum256 writes an arbitrary-length digest of data into hash. -func TurboShakeSum256(hash, data []byte, D byte) { - h := NewTurboShake256(D) - _, _ = h.Write(data) - _, _ = h.Read(hash) -} - -func (d *State) SwitchDS(D byte) { - d.dsbyte = D -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor.go deleted file mode 100644 index 1e21337454..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 && !386 && !ppc64le) || appengine -// +build !amd64,!386,!ppc64le appengine - -package sha3 - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go deleted file mode 100644 index 2b0c661790..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine) -// +build !amd64 appengine -// +build !386 appengine -// +build !ppc64le appengine - -package sha3 - -import "encoding/binary" - -// xorIn xors the bytes in buf into the state; it -// makes no non-portable assumptions about memory layout -// or alignment. -func xorIn(d *State, buf []byte) { - n := len(buf) / 8 - - for i := 0; i < n; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } -} - -// copyOut copies ulint64s to a byte buffer. -func copyOut(d *State, b []byte) { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go deleted file mode 100644 index 052fc8d32d..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (amd64 || 386 || ppc64le) && !appengine -// +build amd64 386 ppc64le -// +build !appengine - -package sha3 - -import "unsafe" - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate / 8]uint64 - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(unsafe.Pointer(b)) -} - -// xorInuses unaligned reads and writes to update d.a to contain d.a -// XOR buf. -func xorIn(d *State, buf []byte) { - n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] - if n >= 72 { - d.a[0] ^= bw[0] - d.a[1] ^= bw[1] - d.a[2] ^= bw[2] - d.a[3] ^= bw[3] - d.a[4] ^= bw[4] - d.a[5] ^= bw[5] - d.a[6] ^= bw[6] - d.a[7] ^= bw[7] - d.a[8] ^= bw[8] - } - if n >= 104 { - d.a[9] ^= bw[9] - d.a[10] ^= bw[10] - d.a[11] ^= bw[11] - d.a[12] ^= bw[12] - } - if n >= 136 { - d.a[13] ^= bw[13] - d.a[14] ^= bw[14] - d.a[15] ^= bw[15] - d.a[16] ^= bw[16] - } - if n >= 144 { - d.a[17] ^= bw[17] - } - if n >= 168 { - d.a[18] ^= bw[18] - d.a[19] ^= bw[19] - d.a[20] ^= bw[20] - } -} - -func copyOut(d *State, buf []byte) { - ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) - copy(buf, ab[:]) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp.go deleted file mode 100644 index 57a50ff5e9..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp.go +++ /dev/null @@ -1,205 +0,0 @@ -// Package fp25519 provides prime field arithmetic over GF(2^255-19). -package fp25519 - -import ( - "errors" - - "github.com/cloudflare/circl/internal/conv" -) - -// Size in bytes of an element. -const Size = 32 - -// Elt is a prime field element. -type Elt [Size]byte - -func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } - -// p is the prime modulus 2^255-19. -var p = Elt{ - 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, -} - -// P returns the prime modulus 2^255-19. -func P() Elt { return p } - -// ToBytes stores in b the little-endian byte representation of x. -func ToBytes(b []byte, x *Elt) error { - if len(b) != Size { - return errors.New("wrong size") - } - Modp(x) - copy(b, x[:]) - return nil -} - -// IsZero returns true if x is equal to 0. -func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } - -// SetOne assigns x=1. -func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 } - -// Neg calculates z = -x. -func Neg(z, x *Elt) { Sub(z, &p, x) } - -// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is -// indicated by returning isQR = true. Otherwise, when x/y is a quadratic -// non-residue, z will have an undetermined value and isQR = false. -func InvSqrt(z, x, y *Elt) (isQR bool) { - sqrtMinusOne := &Elt{ - 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4, - 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f, - 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b, - 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b, - } - t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{} - - Mul(t0, x, y) // t0 = u*v - Sqr(t1, y) // t1 = v^2 - Mul(t2, t0, t1) // t2 = u*v^3 - Sqr(t0, t1) // t0 = v^4 - Mul(t1, t0, t2) // t1 = u*v^7 - - var Tab [4]*Elt - Tab[0] = &Elt{} - Tab[1] = &Elt{} - Tab[2] = t3 - Tab[3] = t1 - - *Tab[0] = *t1 - Sqr(Tab[0], Tab[0]) - Sqr(Tab[1], Tab[0]) - Sqr(Tab[1], Tab[1]) - Mul(Tab[1], Tab[1], Tab[3]) - Mul(Tab[0], Tab[0], Tab[1]) - Sqr(Tab[0], Tab[0]) - Mul(Tab[0], Tab[0], Tab[1]) - Sqr(Tab[1], Tab[0]) - for i := 0; i < 4; i++ { - Sqr(Tab[1], Tab[1]) - } - Mul(Tab[1], Tab[1], Tab[0]) - Sqr(Tab[2], Tab[1]) - for i := 0; i < 4; i++ { - Sqr(Tab[2], Tab[2]) - } - Mul(Tab[2], Tab[2], Tab[0]) - Sqr(Tab[1], Tab[2]) - for i := 0; i < 14; i++ { - Sqr(Tab[1], Tab[1]) - } - Mul(Tab[1], Tab[1], Tab[2]) - Sqr(Tab[2], Tab[1]) - for i := 0; i < 29; i++ { - Sqr(Tab[2], Tab[2]) - } - Mul(Tab[2], Tab[2], Tab[1]) - Sqr(Tab[1], Tab[2]) - for i := 0; i < 59; i++ { - Sqr(Tab[1], Tab[1]) - } - Mul(Tab[1], Tab[1], Tab[2]) - for i := 0; i < 5; i++ { - Sqr(Tab[1], Tab[1]) - } - Mul(Tab[1], Tab[1], Tab[0]) - Sqr(Tab[2], Tab[1]) - for i := 0; i < 124; i++ { - Sqr(Tab[2], Tab[2]) - } - Mul(Tab[2], Tab[2], Tab[1]) - Sqr(Tab[2], Tab[2]) - Sqr(Tab[2], Tab[2]) - Mul(Tab[2], Tab[2], Tab[3]) - - Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8 - // Checking whether y z^2 == x - Sqr(t0, z) // t0 = z^2 - Mul(t0, t0, y) // t0 = yz^2 - Sub(t1, t0, x) // t1 = t0-u - Add(t2, t0, x) // t2 = t0+u - if IsZero(t1) { - return true - } else if IsZero(t2) { - Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1) - return true - } else { - return false - } -} - -// Inv calculates z = 1/x mod p. -func Inv(z, x *Elt) { - x0, x1, x2 := &Elt{}, &Elt{}, &Elt{} - Sqr(x1, x) - Sqr(x0, x1) - Sqr(x0, x0) - Mul(x0, x0, x) - Mul(z, x0, x1) - Sqr(x1, z) - Mul(x0, x0, x1) - Sqr(x1, x0) - for i := 0; i < 4; i++ { - Sqr(x1, x1) - } - Mul(x0, x0, x1) - Sqr(x1, x0) - for i := 0; i < 9; i++ { - Sqr(x1, x1) - } - Mul(x1, x1, x0) - Sqr(x2, x1) - for i := 0; i < 19; i++ { - Sqr(x2, x2) - } - Mul(x2, x2, x1) - for i := 0; i < 10; i++ { - Sqr(x2, x2) - } - Mul(x2, x2, x0) - Sqr(x0, x2) - for i := 0; i < 49; i++ { - Sqr(x0, x0) - } - Mul(x0, x0, x2) - Sqr(x1, x0) - for i := 0; i < 99; i++ { - Sqr(x1, x1) - } - Mul(x1, x1, x0) - for i := 0; i < 50; i++ { - Sqr(x1, x1) - } - Mul(x1, x1, x2) - for i := 0; i < 5; i++ { - Sqr(x1, x1) - } - Mul(z, z, x1) -} - -// Cmov assigns y to x if n is 1. -func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } - -// Cswap interchanges x and y if n is 1. -func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } - -// Add calculates z = x+y mod p. -func Add(z, x, y *Elt) { add(z, x, y) } - -// Sub calculates z = x-y mod p. -func Sub(z, x, y *Elt) { sub(z, x, y) } - -// AddSub calculates (x,y) = (x+y mod p, x-y mod p). -func AddSub(x, y *Elt) { addsub(x, y) } - -// Mul calculates z = x*y mod p. -func Mul(z, x, y *Elt) { mul(z, x, y) } - -// Sqr calculates z = x^2 mod p. -func Sqr(z, x *Elt) { sqr(z, x) } - -// Modp ensures that z is between [0,p-1]. -func Modp(z *Elt) { modp(z) } diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go deleted file mode 100644 index 057f0d2803..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build amd64 && !purego -// +build amd64,!purego - -package fp25519 - -import ( - "golang.org/x/sys/cpu" -) - -var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX - -var _ = hasBmi2Adx - -func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } -func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } -func add(z, x, y *Elt) { addAmd64(z, x, y) } -func sub(z, x, y *Elt) { subAmd64(z, x, y) } -func addsub(x, y *Elt) { addsubAmd64(x, y) } -func mul(z, x, y *Elt) { mulAmd64(z, x, y) } -func sqr(z, x *Elt) { sqrAmd64(z, x) } -func modp(z *Elt) { modpAmd64(z) } - -//go:noescape -func cmovAmd64(x, y *Elt, n uint) - -//go:noescape -func cswapAmd64(x, y *Elt, n uint) - -//go:noescape -func addAmd64(z, x, y *Elt) - -//go:noescape -func subAmd64(z, x, y *Elt) - -//go:noescape -func addsubAmd64(x, y *Elt) - -//go:noescape -func mulAmd64(z, x, y *Elt) - -//go:noescape -func sqrAmd64(z, x *Elt) - -//go:noescape -func modpAmd64(z *Elt) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h deleted file mode 100644 index b884b584ab..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h +++ /dev/null @@ -1,351 +0,0 @@ -// This code was imported from https://github.com/armfazh/rfc7748_precomputed - -// CHECK_BMI2ADX triggers bmi2adx if supported, -// otherwise it fallbacks to legacy code. -#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ - CMPB ·hasBmi2Adx(SB), $0 \ - JE label \ - bmi2adx \ - RET \ - label: \ - legacy \ - RET - -// cselect is a conditional move -// if b=1: it copies y into x; -// if b=0: x remains with the same value; -// if b<> 0,1: undefined. -// Uses: AX, DX, FLAGS -// Instr: x86_64, cmov -#define cselect(x,y,b) \ - TESTQ b, b \ - MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ - MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ - MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ - MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; - -// cswap is a conditional swap -// if b=1: x,y <- y,x; -// if b=0: x,y remain with the same values; -// if b<> 0,1: undefined. -// Uses: AX, DX, R8, FLAGS -// Instr: x86_64, cmov -#define cswap(x,y,b) \ - TESTQ b, b \ - MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ - MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ - MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ - MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; - -// additionLeg adds x and y and stores in z -// Uses: AX, DX, R8-R11, FLAGS -// Instr: x86_64, cmov -#define additionLeg(z,x,y) \ - MOVL $38, AX; \ - MOVL $0, DX; \ - MOVQ 0+x, R8; ADDQ 0+y, R8; \ - MOVQ 8+x, R9; ADCQ 8+y, R9; \ - MOVQ 16+x, R10; ADCQ 16+y, R10; \ - MOVQ 24+x, R11; ADCQ 24+y, R11; \ - CMOVQCS AX, DX; \ - ADDQ DX, R8; \ - ADCQ $0, R9; MOVQ R9, 8+z; \ - ADCQ $0, R10; MOVQ R10, 16+z; \ - ADCQ $0, R11; MOVQ R11, 24+z; \ - MOVL $0, DX; \ - CMOVQCS AX, DX; \ - ADDQ DX, R8; MOVQ R8, 0+z; - -// additionAdx adds x and y and stores in z -// Uses: AX, DX, R8-R11, FLAGS -// Instr: x86_64, cmov, adx -#define additionAdx(z,x,y) \ - MOVL $38, AX; \ - XORL DX, DX; \ - MOVQ 0+x, R8; ADCXQ 0+y, R8; \ - MOVQ 8+x, R9; ADCXQ 8+y, R9; \ - MOVQ 16+x, R10; ADCXQ 16+y, R10; \ - MOVQ 24+x, R11; ADCXQ 24+y, R11; \ - CMOVQCS AX, DX ; \ - XORL AX, AX; \ - ADCXQ DX, R8; \ - ADCXQ AX, R9; MOVQ R9, 8+z; \ - ADCXQ AX, R10; MOVQ R10, 16+z; \ - ADCXQ AX, R11; MOVQ R11, 24+z; \ - MOVL $38, DX; \ - CMOVQCS DX, AX; \ - ADDQ AX, R8; MOVQ R8, 0+z; - -// subtraction subtracts y from x and stores in z -// Uses: AX, DX, R8-R11, FLAGS -// Instr: x86_64, cmov -#define subtraction(z,x,y) \ - MOVL $38, AX; \ - MOVQ 0+x, R8; SUBQ 0+y, R8; \ - MOVQ 8+x, R9; SBBQ 8+y, R9; \ - MOVQ 16+x, R10; SBBQ 16+y, R10; \ - MOVQ 24+x, R11; SBBQ 24+y, R11; \ - MOVL $0, DX; \ - CMOVQCS AX, DX; \ - SUBQ DX, R8; \ - SBBQ $0, R9; MOVQ R9, 8+z; \ - SBBQ $0, R10; MOVQ R10, 16+z; \ - SBBQ $0, R11; MOVQ R11, 24+z; \ - MOVL $0, DX; \ - CMOVQCS AX, DX; \ - SUBQ DX, R8; MOVQ R8, 0+z; - -// integerMulAdx multiplies x and y and stores in z -// Uses: AX, DX, R8-R15, FLAGS -// Instr: x86_64, bmi2, adx -#define integerMulAdx(z,x,y) \ - MOVL $0,R15; \ - MOVQ 0+y, DX; XORL AX, AX; \ - MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \ - MULXQ 8+x, AX, R9; ADCXQ AX, R8; \ - MULXQ 16+x, AX, R10; ADCXQ AX, R9; \ - MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ - MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \ - MOVQ 8+y, DX; XORL AX, AX; \ - MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \ - MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \ - MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \ - MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \ - MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \ - MOVQ 16+y, DX; XORL AX, AX; \ - MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \ - MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \ - MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \ - MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \ - MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \ - MOVQ 24+y, DX; XORL AX, AX; \ - MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \ - MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \ - MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \ - MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \ - MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z; - -// integerMulLeg multiplies x and y and stores in z -// Uses: AX, DX, R8-R15, FLAGS -// Instr: x86_64 -#define integerMulLeg(z,x,y) \ - MOVQ 0+y, R8; \ - MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \ - MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ - MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ - MOVQ 24+x, AX; MULQ R8; \ - ADDQ R13, R15; \ - ADCQ R14, R10; MOVQ R10, 16+z; \ - ADCQ AX, R11; MOVQ R11, 24+z; \ - ADCQ $0, DX; MOVQ DX, 32+z; \ - MOVQ 8+y, R8; \ - MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ - MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ - MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ - MOVQ 24+x, AX; MULQ R8; \ - ADDQ R12, R15; MOVQ R15, 8+z; \ - ADCQ R13, R9; \ - ADCQ R14, R10; \ - ADCQ AX, R11; \ - ADCQ $0, DX; \ - ADCQ 16+z, R9; MOVQ R9, R15; \ - ADCQ 24+z, R10; MOVQ R10, 24+z; \ - ADCQ 32+z, R11; MOVQ R11, 32+z; \ - ADCQ $0, DX; MOVQ DX, 40+z; \ - MOVQ 16+y, R8; \ - MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ - MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ - MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ - MOVQ 24+x, AX; MULQ R8; \ - ADDQ R12, R15; MOVQ R15, 16+z; \ - ADCQ R13, R9; \ - ADCQ R14, R10; \ - ADCQ AX, R11; \ - ADCQ $0, DX; \ - ADCQ 24+z, R9; MOVQ R9, R15; \ - ADCQ 32+z, R10; MOVQ R10, 32+z; \ - ADCQ 40+z, R11; MOVQ R11, 40+z; \ - ADCQ $0, DX; MOVQ DX, 48+z; \ - MOVQ 24+y, R8; \ - MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ - MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ - MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ - MOVQ 24+x, AX; MULQ R8; \ - ADDQ R12, R15; MOVQ R15, 24+z; \ - ADCQ R13, R9; \ - ADCQ R14, R10; \ - ADCQ AX, R11; \ - ADCQ $0, DX; \ - ADCQ 32+z, R9; MOVQ R9, 32+z; \ - ADCQ 40+z, R10; MOVQ R10, 40+z; \ - ADCQ 48+z, R11; MOVQ R11, 48+z; \ - ADCQ $0, DX; MOVQ DX, 56+z; - -// integerSqrLeg squares x and stores in z -// Uses: AX, CX, DX, R8-R15, FLAGS -// Instr: x86_64 -#define integerSqrLeg(z,x) \ - MOVQ 0+x, R8; \ - MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \ - MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \ - MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \ - MOVQ 24+x, R8; \ - MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \ - MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \ - \ - ADDQ R14, R10;\ - ADCQ R15, R11; MOVL $0, R15;\ - ADCQ CX, R12;\ - ADCQ AX, R13;\ - ADCQ $0, DX; MOVQ DX, R14;\ - MOVQ 8+x, AX; MULQ 16+x;\ - \ - ADDQ AX, R11;\ - ADCQ DX, R12;\ - ADCQ $0, R13;\ - ADCQ $0, R14;\ - ADCQ $0, R15;\ - \ - SHLQ $1, R14, R15; MOVQ R15, 56+z;\ - SHLQ $1, R13, R14; MOVQ R14, 48+z;\ - SHLQ $1, R12, R13; MOVQ R13, 40+z;\ - SHLQ $1, R11, R12; MOVQ R12, 32+z;\ - SHLQ $1, R10, R11; MOVQ R11, 24+z;\ - SHLQ $1, R9, R10; MOVQ R10, 16+z;\ - SHLQ $1, R9; MOVQ R9, 8+z;\ - \ - MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\ - MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\ - MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\ - MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\ - \ - ADDQ 8+z, R9; MOVQ R9, 8+z;\ - ADCQ 16+z, R10; MOVQ R10, 16+z;\ - ADCQ 24+z, R11; MOVQ R11, 24+z;\ - ADCQ 32+z, R12; MOVQ R12, 32+z;\ - ADCQ 40+z, R13; MOVQ R13, 40+z;\ - ADCQ 48+z, R14; MOVQ R14, 48+z;\ - ADCQ 56+z, R15; MOVQ R15, 56+z; - -// integerSqrAdx squares x and stores in z -// Uses: AX, CX, DX, R8-R15, FLAGS -// Instr: x86_64, bmi2, adx -#define integerSqrAdx(z,x) \ - MOVQ 0+x, DX; /* A[0] */ \ - MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \ - MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \ - MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \ - MOVQ 24+x, DX; /* A[3] */ \ - MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \ - MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \ - MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \ - MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \ - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \ - XORL R15, R15; \ - ADOXQ AX, R10; ADCXQ R8, R8; \ - ADOXQ CX, R11; ADCXQ R9, R9; \ - ADOXQ R15, R12; ADCXQ R10, R10; \ - ADOXQ R15, R13; ADCXQ R11, R11; \ - ADOXQ R15, R14; ADCXQ R12, R12; \ - ;;;;;;;;;;;;;;; ADCXQ R13, R13; \ - ;;;;;;;;;;;;;;; ADCXQ R14, R14; \ - MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \ - ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \ - ADDQ CX, R8; MOVQ R8, 8+z; \ - MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \ - ADCQ AX, R9; MOVQ R9, 16+z; \ - ADCQ CX, R10; MOVQ R10, 24+z; \ - MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \ - ADCQ AX, R11; MOVQ R11, 32+z; \ - ADCQ CX, R12; MOVQ R12, 40+z; \ - MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \ - ADCQ AX, R13; MOVQ R13, 48+z; \ - ADCQ CX, R14; MOVQ R14, 56+z; - -// reduceFromDouble finds z congruent to x modulo p such that 0> 63) - // PUT BIT 255 IN CARRY FLAG AND CLEAR - x3 &^= 1 << 63 - - x0, c0 := bits.Add64(x0, cx, 0) - x1, c1 := bits.Add64(x1, 0, c0) - x2, c2 := bits.Add64(x2, 0, c1) - x3, _ = bits.Add64(x3, 0, c2) - - // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19 - // cx = C[255] ? 0 : 19 - cx = uint64(19) &^ (-(x3 >> 63)) - // CLEAR BIT 255 - x3 &^= 1 << 63 - - x0, c0 = bits.Sub64(x0, cx, 0) - x1, c1 = bits.Sub64(x1, 0, c0) - x2, c2 = bits.Sub64(x2, 0, c1) - x3, _ = bits.Sub64(x3, 0, c2) - - binary.LittleEndian.PutUint64(x[0*8:1*8], x0) - binary.LittleEndian.PutUint64(x[1*8:2*8], x1) - binary.LittleEndian.PutUint64(x[2*8:3*8], x2) - binary.LittleEndian.PutUint64(x[3*8:4*8], x3) -} - -func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) { - h0, l0 := bits.Mul64(x4, 38) - h1, l1 := bits.Mul64(x5, 38) - h2, l2 := bits.Mul64(x6, 38) - h3, l3 := bits.Mul64(x7, 38) - - l1, c0 := bits.Add64(h0, l1, 0) - l2, c1 := bits.Add64(h1, l2, c0) - l3, c2 := bits.Add64(h2, l3, c1) - l4, _ := bits.Add64(h3, 0, c2) - - l0, c0 = bits.Add64(l0, x0, 0) - l1, c1 = bits.Add64(l1, x1, c0) - l2, c2 = bits.Add64(l2, x2, c1) - l3, c3 := bits.Add64(l3, x3, c2) - l4, _ = bits.Add64(l4, 0, c3) - - _, l4 = bits.Mul64(l4, 38) - l0, c0 = bits.Add64(l0, l4, 0) - z1, c1 := bits.Add64(l1, 0, c0) - z2, c2 := bits.Add64(l2, 0, c1) - z3, c3 := bits.Add64(l3, 0, c2) - z0, _ := bits.Add64(l0, (-c3)&38, 0) - - binary.LittleEndian.PutUint64(z[0*8:1*8], z0) - binary.LittleEndian.PutUint64(z[1*8:2*8], z1) - binary.LittleEndian.PutUint64(z[2*8:3*8], z2) - binary.LittleEndian.PutUint64(z[3*8:4*8], z3) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go deleted file mode 100644 index 26ca4d01b7..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !amd64 || purego -// +build !amd64 purego - -package fp25519 - -func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } -func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } -func add(z, x, y *Elt) { addGeneric(z, x, y) } -func sub(z, x, y *Elt) { subGeneric(z, x, y) } -func addsub(x, y *Elt) { addsubGeneric(x, y) } -func mul(z, x, y *Elt) { mulGeneric(z, x, y) } -func sqr(z, x *Elt) { sqrGeneric(z, x) } -func modp(z *Elt) { modpGeneric(z) } diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp.go deleted file mode 100644 index a5e36600bb..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1). -package fp448 - -import ( - "errors" - - "github.com/cloudflare/circl/internal/conv" -) - -// Size in bytes of an element. -const Size = 56 - -// Elt is a prime field element. -type Elt [Size]byte - -func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } - -// p is the prime modulus 2^448-2^224-1. -var p = Elt{ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -} - -// P returns the prime modulus 2^448-2^224-1. -func P() Elt { return p } - -// ToBytes stores in b the little-endian byte representation of x. -func ToBytes(b []byte, x *Elt) error { - if len(b) != Size { - return errors.New("wrong size") - } - Modp(x) - copy(b, x[:]) - return nil -} - -// IsZero returns true if x is equal to 0. -func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } - -// IsOne returns true if x is equal to 1. -func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} } - -// SetOne assigns x=1. -func SetOne(x *Elt) { *x = Elt{1} } - -// One returns the 1 element. -func One() (x Elt) { x = Elt{1}; return } - -// Neg calculates z = -x. -func Neg(z, x *Elt) { Sub(z, &p, x) } - -// Modp ensures that z is between [0,p-1]. -func Modp(z *Elt) { Sub(z, z, &p) } - -// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so, -// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue, -// and z = sqrt(-x/y). -func InvSqrt(z, x, y *Elt) (isQR bool) { - // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x - // so that's x if x is a quadratic residue and -x otherwise. - // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y). - // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y). - // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y). - t0, t1 := &Elt{}, &Elt{} - Mul(t0, x, y) // x*y - Sqr(t1, y) // y^2 - Mul(t1, t0, t1) // x*y^3 - powPminus3div4(z, t1) // (x*y^3)^k - Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1) - - // Check if x/y is a quadratic residue - Sqr(t0, z) // z^2 - Mul(t0, t0, y) // y*z^2 - Sub(t0, t0, x) // y*z^2-x - return IsZero(t0) -} - -// Inv calculates z = 1/x mod p. -func Inv(z, x *Elt) { - // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4. - t := &Elt{} - powPminus3div4(t, x) // t = x^k - Sqr(t, t) // t = x^2k - Sqr(t, t) // t = x^4k - Mul(z, t, x) // z = x^(4k+1) -} - -// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4. -func powPminus3div4(z, x *Elt) { - x0, x1 := &Elt{}, &Elt{} - Sqr(z, x) - Mul(z, z, x) - Sqr(x0, z) - Mul(x0, x0, x) - Sqr(z, x0) - Sqr(z, z) - Sqr(z, z) - Mul(z, z, x0) - Sqr(x1, z) - for i := 0; i < 5; i++ { - Sqr(x1, x1) - } - Mul(x1, x1, z) - Sqr(z, x1) - for i := 0; i < 11; i++ { - Sqr(z, z) - } - Mul(z, z, x1) - Sqr(z, z) - Sqr(z, z) - Sqr(z, z) - Mul(z, z, x0) - Sqr(x1, z) - for i := 0; i < 26; i++ { - Sqr(x1, x1) - } - Mul(x1, x1, z) - Sqr(z, x1) - for i := 0; i < 53; i++ { - Sqr(z, z) - } - Mul(z, z, x1) - Sqr(z, z) - Sqr(z, z) - Sqr(z, z) - Mul(z, z, x0) - Sqr(x1, z) - for i := 0; i < 110; i++ { - Sqr(x1, x1) - } - Mul(x1, x1, z) - Sqr(z, x1) - Mul(z, z, x) - for i := 0; i < 223; i++ { - Sqr(z, z) - } - Mul(z, z, x1) -} - -// Cmov assigns y to x if n is 1. -func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } - -// Cswap interchanges x and y if n is 1. -func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } - -// Add calculates z = x+y mod p. -func Add(z, x, y *Elt) { add(z, x, y) } - -// Sub calculates z = x-y mod p. -func Sub(z, x, y *Elt) { sub(z, x, y) } - -// AddSub calculates (x,y) = (x+y mod p, x-y mod p). -func AddSub(x, y *Elt) { addsub(x, y) } - -// Mul calculates z = x*y mod p. -func Mul(z, x, y *Elt) { mul(z, x, y) } - -// Sqr calculates z = x^2 mod p. -func Sqr(z, x *Elt) { sqr(z, x) } diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go deleted file mode 100644 index 6a12209a70..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build amd64 && !purego -// +build amd64,!purego - -package fp448 - -import ( - "golang.org/x/sys/cpu" -) - -var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX - -var _ = hasBmi2Adx - -func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } -func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } -func add(z, x, y *Elt) { addAmd64(z, x, y) } -func sub(z, x, y *Elt) { subAmd64(z, x, y) } -func addsub(x, y *Elt) { addsubAmd64(x, y) } -func mul(z, x, y *Elt) { mulAmd64(z, x, y) } -func sqr(z, x *Elt) { sqrAmd64(z, x) } - -/* Functions defined in fp_amd64.s */ - -//go:noescape -func cmovAmd64(x, y *Elt, n uint) - -//go:noescape -func cswapAmd64(x, y *Elt, n uint) - -//go:noescape -func addAmd64(z, x, y *Elt) - -//go:noescape -func subAmd64(z, x, y *Elt) - -//go:noescape -func addsubAmd64(x, y *Elt) - -//go:noescape -func mulAmd64(z, x, y *Elt) - -//go:noescape -func sqrAmd64(z, x *Elt) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h deleted file mode 100644 index 536fe5bdfe..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h +++ /dev/null @@ -1,591 +0,0 @@ -// This code was imported from https://github.com/armfazh/rfc7748_precomputed - -// CHECK_BMI2ADX triggers bmi2adx if supported, -// otherwise it fallbacks to legacy code. -#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ - CMPB ·hasBmi2Adx(SB), $0 \ - JE label \ - bmi2adx \ - RET \ - label: \ - legacy \ - RET - -// cselect is a conditional move -// if b=1: it copies y into x; -// if b=0: x remains with the same value; -// if b<> 0,1: undefined. -// Uses: AX, DX, FLAGS -// Instr: x86_64, cmov -#define cselect(x,y,b) \ - TESTQ b, b \ - MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ - MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ - MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ - MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \ - MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \ - MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \ - MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x; - -// cswap is a conditional swap -// if b=1: x,y <- y,x; -// if b=0: x,y remain with the same values; -// if b<> 0,1: undefined. -// Uses: AX, DX, R8, FLAGS -// Instr: x86_64, cmov -#define cswap(x,y,b) \ - TESTQ b, b \ - MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ - MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ - MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ - MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \ - MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \ - MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \ - MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y; - -// additionLeg adds x and y and stores in z -// Uses: AX, DX, R8-R14, FLAGS -// Instr: x86_64 -#define additionLeg(z,x,y) \ - MOVQ 0+x, R8; ADDQ 0+y, R8; \ - MOVQ 8+x, R9; ADCQ 8+y, R9; \ - MOVQ 16+x, R10; ADCQ 16+y, R10; \ - MOVQ 24+x, R11; ADCQ 24+y, R11; \ - MOVQ 32+x, R12; ADCQ 32+y, R12; \ - MOVQ 40+x, R13; ADCQ 40+y, R13; \ - MOVQ 48+x, R14; ADCQ 48+y, R14; \ - MOVQ $0, AX; ADCQ $0, AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - ADDQ AX, R8; MOVQ $0, AX; \ - ADCQ $0, R9; \ - ADCQ $0, R10; \ - ADCQ DX, R11; \ - ADCQ $0, R12; \ - ADCQ $0, R13; \ - ADCQ $0, R14; \ - ADCQ $0, AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - ADDQ AX, R8; MOVQ R8, 0+z; \ - ADCQ $0, R9; MOVQ R9, 8+z; \ - ADCQ $0, R10; MOVQ R10, 16+z; \ - ADCQ DX, R11; MOVQ R11, 24+z; \ - ADCQ $0, R12; MOVQ R12, 32+z; \ - ADCQ $0, R13; MOVQ R13, 40+z; \ - ADCQ $0, R14; MOVQ R14, 48+z; - - -// additionAdx adds x and y and stores in z -// Uses: AX, DX, R8-R15, FLAGS -// Instr: x86_64, adx -#define additionAdx(z,x,y) \ - MOVL $32, R15; \ - XORL DX, DX; \ - MOVQ 0+x, R8; ADCXQ 0+y, R8; \ - MOVQ 8+x, R9; ADCXQ 8+y, R9; \ - MOVQ 16+x, R10; ADCXQ 16+y, R10; \ - MOVQ 24+x, R11; ADCXQ 24+y, R11; \ - MOVQ 32+x, R12; ADCXQ 32+y, R12; \ - MOVQ 40+x, R13; ADCXQ 40+y, R13; \ - MOVQ 48+x, R14; ADCXQ 48+y, R14; \ - ;;;;;;;;;;;;;;; ADCXQ DX, DX; \ - XORL AX, AX; \ - ADCXQ DX, R8; SHLXQ R15, DX, DX; \ - ADCXQ AX, R9; \ - ADCXQ AX, R10; \ - ADCXQ DX, R11; \ - ADCXQ AX, R12; \ - ADCXQ AX, R13; \ - ADCXQ AX, R14; \ - ADCXQ AX, AX; \ - XORL DX, DX; \ - ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \ - ADCXQ DX, R9; MOVQ R9, 8+z; \ - ADCXQ DX, R10; MOVQ R10, 16+z; \ - ADCXQ AX, R11; MOVQ R11, 24+z; \ - ADCXQ DX, R12; MOVQ R12, 32+z; \ - ADCXQ DX, R13; MOVQ R13, 40+z; \ - ADCXQ DX, R14; MOVQ R14, 48+z; - -// subtraction subtracts y from x and stores in z -// Uses: AX, DX, R8-R14, FLAGS -// Instr: x86_64 -#define subtraction(z,x,y) \ - MOVQ 0+x, R8; SUBQ 0+y, R8; \ - MOVQ 8+x, R9; SBBQ 8+y, R9; \ - MOVQ 16+x, R10; SBBQ 16+y, R10; \ - MOVQ 24+x, R11; SBBQ 24+y, R11; \ - MOVQ 32+x, R12; SBBQ 32+y, R12; \ - MOVQ 40+x, R13; SBBQ 40+y, R13; \ - MOVQ 48+x, R14; SBBQ 48+y, R14; \ - MOVQ $0, AX; SETCS AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - SUBQ AX, R8; MOVQ $0, AX; \ - SBBQ $0, R9; \ - SBBQ $0, R10; \ - SBBQ DX, R11; \ - SBBQ $0, R12; \ - SBBQ $0, R13; \ - SBBQ $0, R14; \ - SETCS AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - SUBQ AX, R8; MOVQ R8, 0+z; \ - SBBQ $0, R9; MOVQ R9, 8+z; \ - SBBQ $0, R10; MOVQ R10, 16+z; \ - SBBQ DX, R11; MOVQ R11, 24+z; \ - SBBQ $0, R12; MOVQ R12, 32+z; \ - SBBQ $0, R13; MOVQ R13, 40+z; \ - SBBQ $0, R14; MOVQ R14, 48+z; - -// maddBmi2Adx multiplies x and y and accumulates in z -// Uses: AX, DX, R15, FLAGS -// Instr: x86_64, bmi2, adx -#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \ - MOVQ i+y, DX; XORL AX, AX; \ - MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \ - MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \ - MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \ - MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \ - MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \ - MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \ - MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \ - ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0; - -// integerMulAdx multiplies x and y and stores in z -// Uses: AX, DX, R8-R15, FLAGS -// Instr: x86_64, bmi2, adx -#define integerMulAdx(z,x,y) \ - MOVL $0,R15; \ - MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \ - MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \ - MULXQ 8+x, AX, R10; ADCXQ AX, R9; \ - MULXQ 16+x, AX, R11; ADCXQ AX, R10; \ - MULXQ 24+x, AX, R12; ADCXQ AX, R11; \ - MULXQ 32+x, AX, R13; ADCXQ AX, R12; \ - MULXQ 40+x, AX, R14; ADCXQ AX, R13; \ - MULXQ 48+x, AX, R15; ADCXQ AX, R14; \ - ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \ - maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \ - maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \ - maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \ - maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \ - maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \ - maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \ - MOVQ R15, 56+z; \ - MOVQ R9, 64+z; \ - MOVQ R10, 72+z; \ - MOVQ R11, 80+z; \ - MOVQ R12, 88+z; \ - MOVQ R13, 96+z; \ - MOVQ R14, 104+z; - -// maddLegacy multiplies x and y and accumulates in z -// Uses: AX, DX, R15, FLAGS -// Instr: x86_64 -#define maddLegacy(z,x,y,i) \ - MOVQ i+y, R15; \ - MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ - MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ - MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ - MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ - MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ - MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ - MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ - ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \ - ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \ - ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \ - ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \ - ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \ - ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \ - ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \ - ADCQ $0, DX; MOVQ DX, 56+i+z; - -// integerMulLeg multiplies x and y and stores in z -// Uses: AX, DX, R8-R15, FLAGS -// Instr: x86_64 -#define integerMulLeg(z,x,y) \ - MOVQ 0+y, R15; \ - MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \ - MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ - MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \ - MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \ - MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \ - MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \ - MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \ - maddLegacy(z,x,y, 8) \ - maddLegacy(z,x,y,16) \ - maddLegacy(z,x,y,24) \ - maddLegacy(z,x,y,32) \ - maddLegacy(z,x,y,40) \ - maddLegacy(z,x,y,48) - -// integerSqrLeg squares x and stores in z -// Uses: AX, CX, DX, R8-R15, FLAGS -// Instr: x86_64 -#define integerSqrLeg(z,x) \ - XORL R15, R15; \ - MOVQ 0+x, CX; \ - MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \ - ADDQ CX, CX; ADCQ $0, R15; \ - MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ - MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ - MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ - MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ - MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ - MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ - \ - MOVQ 8+x, CX; \ - MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ - ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \ - MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ - ADDQ 8+x, CX; ADCQ $0, R15; \ - MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \ - MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \ - MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \ - MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ - MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \ - \ - MOVQ 16+x, CX; \ - MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ - ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \ - MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ - ADDQ 16+x, CX; ADCQ $0, R15; \ - MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \ - MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ - MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \ - MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \ - \ - MOVQ 24+x, CX; \ - MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ - ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \ - MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ - ADDQ 24+x, CX; ADCQ $0, R15; \ - MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \ - MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \ - MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \ - \ - MOVQ 32+x, CX; \ - MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ - ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \ - MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ - ADDQ 32+x, CX; ADCQ $0, R15; \ - MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \ - MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \ - \ - XORL R13, R13; \ - XORL R14, R14; \ - MOVQ 40+x, CX; \ - MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ - ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \ - MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ - ADDQ 40+x, CX; ADCQ $0, R15; \ - MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \ - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \ - \ - XORL R9, R9; \ - MOVQ 48+x, CX; \ - MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ - ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \ - MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ - ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z; - - -// integerSqrAdx squares x and stores in z -// Uses: AX, CX, DX, R8-R15, FLAGS -// Instr: x86_64, bmi2, adx -#define integerSqrAdx(z,x) \ - XORL R15, R15; \ - MOVQ 0+x, DX; \ - ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \ - ADDQ DX, DX; ADCQ $0, R15; CLC; \ - MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \ - MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\ - MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ - MULXQ 32+x, AX, R12; ADCXQ AX, R11; \ - MULXQ 40+x, AX, R13; ADCXQ AX, R12; \ - MULXQ 48+x, AX, R14; ADCXQ AX, R13; \ - ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \ - \ - MOVQ 8+x, DX; \ - MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ - MULXQ AX, AX, CX; \ - MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \ - ADDQ AX, R9; MOVQ R9, 16+z; \ - ADCQ CX, R8; \ - ADCQ $0, R11; \ - ADDQ 8+x, DX; \ - ADCQ $0, R15; \ - XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ - MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \ - MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \ - MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \ - MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \ - MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ - ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \ - \ - MOVQ 16+x, DX; \ - MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ - MULXQ AX, AX, CX; \ - MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \ - ADDQ AX, R11; MOVQ R11, 32+z; \ - ADCQ CX, R8; \ - ADCQ $0, R13; \ - ADDQ 16+x, DX; \ - ADCQ $0, R15; \ - XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ - MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \ - MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \ - MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ - MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \ - ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \ - \ - MOVQ 24+x, DX; \ - MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ - MULXQ AX, AX, CX; \ - MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \ - ADDQ AX, R13; MOVQ R13, 48+z; \ - ADCQ CX, R8; \ - ADCQ $0, R9; \ - ADDQ 24+x, DX; \ - ADCQ $0, R15; \ - XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \ - MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \ - MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \ - MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \ - ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \ - \ - MOVQ 32+x, DX; \ - MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ - MULXQ AX, AX, CX; \ - MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \ - ADDQ AX, R9; MOVQ R9, 64+z; \ - ADCQ CX, R8; \ - ADCQ $0, R11; \ - ADDQ 32+x, DX; \ - ADCQ $0, R15; \ - XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ - MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \ - MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \ - ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \ - \ - MOVQ 40+x, DX; \ - MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ - MULXQ AX, AX, CX; \ - MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \ - ADDQ AX, R11; MOVQ R11, 80+z; \ - ADCQ CX, R8; \ - ADCQ $0, R13; \ - ADDQ 40+x, DX; \ - ADCQ $0, R15; \ - XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ - MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \ - ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \ - \ - MOVQ 48+x, DX; \ - MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ - MULXQ AX, AX, CX; \ - MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \ - XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \ - ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \ - ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z; - -// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z -// Uses: AX, R8-R15, FLAGS -// Instr: x86_64 -#define reduceFromDoubleLeg(z,x) \ - /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ - /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ - MOVQ 80+x,AX; MOVQ AX,R10; \ - MOVQ $0xFFFFFFFF00000000, R8; \ - ANDQ R8,R10; \ - \ - MOVQ $0,R14; \ - MOVQ 104+x,R13; SHLQ $1,R13,R14; \ - MOVQ 96+x,R12; SHLQ $1,R12,R13; \ - MOVQ 88+x,R11; SHLQ $1,R11,R12; \ - MOVQ 72+x, R9; SHLQ $1,R10,R11; \ - MOVQ 64+x, R8; SHLQ $1,R10; \ - MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ - MOVQ 56+x,R15; \ - \ - ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ - ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ - ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ - ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ - ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ - ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ - ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ - ADCQ $0,R14; \ - /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ - /* ( r9, r8, r15, r13, r12, r11, r10) */ \ - MOVQ R10, AX; \ - SHRQ $32,R11,R10; \ - SHRQ $32,R12,R11; \ - SHRQ $32,R13,R12; \ - SHRQ $32,R15,R13; \ - SHRQ $32, R8,R15; \ - SHRQ $32, R9, R8; \ - SHRQ $32, AX, R9; \ - \ - ADDQ 0+z,R10; \ - ADCQ 8+z,R11; \ - ADCQ 16+z,R12; \ - ADCQ 24+z,R13; \ - ADCQ 32+z,R15; \ - ADCQ 40+z, R8; \ - ADCQ 48+z, R9; \ - ADCQ $0,R14; \ - /* ( c7) + (c6,...,c0) */ \ - /* (r14) */ \ - MOVQ R14, AX; SHLQ $32, AX; \ - ADDQ R14,R10; MOVQ $0,R14; \ - ADCQ $0,R11; \ - ADCQ $0,R12; \ - ADCQ AX,R13; \ - ADCQ $0,R15; \ - ADCQ $0, R8; \ - ADCQ $0, R9; \ - ADCQ $0,R14; \ - /* ( c7) + (c6,...,c0) */ \ - /* (r14) */ \ - MOVQ R14, AX; SHLQ $32,AX; \ - ADDQ R14,R10; MOVQ R10, 0+z; \ - ADCQ $0,R11; MOVQ R11, 8+z; \ - ADCQ $0,R12; MOVQ R12,16+z; \ - ADCQ AX,R13; MOVQ R13,24+z; \ - ADCQ $0,R15; MOVQ R15,32+z; \ - ADCQ $0, R8; MOVQ R8,40+z; \ - ADCQ $0, R9; MOVQ R9,48+z; - -// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z -// Uses: AX, R8-R15, FLAGS -// Instr: x86_64, adx -#define reduceFromDoubleAdx(z,x) \ - /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ - /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ - MOVQ 80+x,AX; MOVQ AX,R10; \ - MOVQ $0xFFFFFFFF00000000, R8; \ - ANDQ R8,R10; \ - \ - MOVQ $0,R14; \ - MOVQ 104+x,R13; SHLQ $1,R13,R14; \ - MOVQ 96+x,R12; SHLQ $1,R12,R13; \ - MOVQ 88+x,R11; SHLQ $1,R11,R12; \ - MOVQ 72+x, R9; SHLQ $1,R10,R11; \ - MOVQ 64+x, R8; SHLQ $1,R10; \ - MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ - MOVQ 56+x,R15; \ - \ - XORL AX,AX; \ - ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ - ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ - ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ - ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ - ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ - ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ - ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ - ADCXQ AX,R14; \ - /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ - /* ( r9, r8, r15, r13, r12, r11, r10) */ \ - MOVQ R10, AX; \ - SHRQ $32,R11,R10; \ - SHRQ $32,R12,R11; \ - SHRQ $32,R13,R12; \ - SHRQ $32,R15,R13; \ - SHRQ $32, R8,R15; \ - SHRQ $32, R9, R8; \ - SHRQ $32, AX, R9; \ - \ - XORL AX,AX; \ - ADCXQ 0+z,R10; \ - ADCXQ 8+z,R11; \ - ADCXQ 16+z,R12; \ - ADCXQ 24+z,R13; \ - ADCXQ 32+z,R15; \ - ADCXQ 40+z, R8; \ - ADCXQ 48+z, R9; \ - ADCXQ AX,R14; \ - /* ( c7) + (c6,...,c0) */ \ - /* (r14) */ \ - MOVQ R14, AX; SHLQ $32, AX; \ - CLC; \ - ADCXQ R14,R10; MOVQ $0,R14; \ - ADCXQ R14,R11; \ - ADCXQ R14,R12; \ - ADCXQ AX,R13; \ - ADCXQ R14,R15; \ - ADCXQ R14, R8; \ - ADCXQ R14, R9; \ - ADCXQ R14,R14; \ - /* ( c7) + (c6,...,c0) */ \ - /* (r14) */ \ - MOVQ R14, AX; SHLQ $32, AX; \ - CLC; \ - ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \ - ADCXQ R14,R11; MOVQ R11, 8+z; \ - ADCXQ R14,R12; MOVQ R12,16+z; \ - ADCXQ AX,R13; MOVQ R13,24+z; \ - ADCXQ R14,R15; MOVQ R15,32+z; \ - ADCXQ R14, R8; MOVQ R8,40+z; \ - ADCXQ R14, R9; MOVQ R9,48+z; - -// addSub calculates two operations: x,y = x+y,x-y -// Uses: AX, DX, R8-R15, FLAGS -#define addSub(x,y) \ - MOVQ 0+x, R8; ADDQ 0+y, R8; \ - MOVQ 8+x, R9; ADCQ 8+y, R9; \ - MOVQ 16+x, R10; ADCQ 16+y, R10; \ - MOVQ 24+x, R11; ADCQ 24+y, R11; \ - MOVQ 32+x, R12; ADCQ 32+y, R12; \ - MOVQ 40+x, R13; ADCQ 40+y, R13; \ - MOVQ 48+x, R14; ADCQ 48+y, R14; \ - MOVQ $0, AX; ADCQ $0, AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - ADDQ AX, R8; MOVQ $0, AX; \ - ADCQ $0, R9; \ - ADCQ $0, R10; \ - ADCQ DX, R11; \ - ADCQ $0, R12; \ - ADCQ $0, R13; \ - ADCQ $0, R14; \ - ADCQ $0, AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \ - ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \ - ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \ - ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \ - ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \ - ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \ - ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \ - SUBQ 0+y, R8; \ - SBBQ 8+y, R9; \ - SBBQ 16+y, R10; \ - SBBQ 24+y, R11; \ - SBBQ 32+y, R12; \ - SBBQ 40+y, R13; \ - SBBQ 48+y, R14; \ - MOVQ $0, AX; SETCS AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - SUBQ AX, R8; MOVQ $0, AX; \ - SBBQ $0, R9; \ - SBBQ $0, R10; \ - SBBQ DX, R11; \ - SBBQ $0, R12; \ - SBBQ $0, R13; \ - SBBQ $0, R14; \ - SETCS AX; \ - MOVQ AX, DX; \ - SHLQ $32, DX; \ - SUBQ AX, R8; MOVQ R8, 0+y; \ - SBBQ $0, R9; MOVQ R9, 8+y; \ - SBBQ $0, R10; MOVQ R10, 16+y; \ - SBBQ DX, R11; MOVQ R11, 24+y; \ - SBBQ $0, R12; MOVQ R12, 32+y; \ - SBBQ $0, R13; MOVQ R13, 40+y; \ - SBBQ $0, R14; MOVQ R14, 48+y; diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s deleted file mode 100644 index 3f1f07c986..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s +++ /dev/null @@ -1,75 +0,0 @@ -//go:build amd64 && !purego -// +build amd64,!purego - -#include "textflag.h" -#include "fp_amd64.h" - -// func cmovAmd64(x, y *Elt, n uint) -TEXT ·cmovAmd64(SB),NOSPLIT,$0-24 - MOVQ x+0(FP), DI - MOVQ y+8(FP), SI - MOVQ n+16(FP), BX - cselect(0(DI),0(SI),BX) - RET - -// func cswapAmd64(x, y *Elt, n uint) -TEXT ·cswapAmd64(SB),NOSPLIT,$0-24 - MOVQ x+0(FP), DI - MOVQ y+8(FP), SI - MOVQ n+16(FP), BX - cswap(0(DI),0(SI),BX) - RET - -// func subAmd64(z, x, y *Elt) -TEXT ·subAmd64(SB),NOSPLIT,$0-24 - MOVQ z+0(FP), DI - MOVQ x+8(FP), SI - MOVQ y+16(FP), BX - subtraction(0(DI),0(SI),0(BX)) - RET - -// func addsubAmd64(x, y *Elt) -TEXT ·addsubAmd64(SB),NOSPLIT,$0-16 - MOVQ x+0(FP), DI - MOVQ y+8(FP), SI - addSub(0(DI),0(SI)) - RET - -#define addLegacy \ - additionLeg(0(DI),0(SI),0(BX)) -#define addBmi2Adx \ - additionAdx(0(DI),0(SI),0(BX)) - -#define mulLegacy \ - integerMulLeg(0(SP),0(SI),0(BX)) \ - reduceFromDoubleLeg(0(DI),0(SP)) -#define mulBmi2Adx \ - integerMulAdx(0(SP),0(SI),0(BX)) \ - reduceFromDoubleAdx(0(DI),0(SP)) - -#define sqrLegacy \ - integerSqrLeg(0(SP),0(SI)) \ - reduceFromDoubleLeg(0(DI),0(SP)) -#define sqrBmi2Adx \ - integerSqrAdx(0(SP),0(SI)) \ - reduceFromDoubleAdx(0(DI),0(SP)) - -// func addAmd64(z, x, y *Elt) -TEXT ·addAmd64(SB),NOSPLIT,$0-24 - MOVQ z+0(FP), DI - MOVQ x+8(FP), SI - MOVQ y+16(FP), BX - CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx) - -// func mulAmd64(z, x, y *Elt) -TEXT ·mulAmd64(SB),NOSPLIT,$112-24 - MOVQ z+0(FP), DI - MOVQ x+8(FP), SI - MOVQ y+16(FP), BX - CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx) - -// func sqrAmd64(z, x *Elt) -TEXT ·sqrAmd64(SB),NOSPLIT,$112-16 - MOVQ z+0(FP), DI - MOVQ x+8(FP), SI - CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx) diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go deleted file mode 100644 index 47a0b63205..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go +++ /dev/null @@ -1,339 +0,0 @@ -package fp448 - -import ( - "encoding/binary" - "math/bits" -) - -func cmovGeneric(x, y *Elt, n uint) { - m := -uint64(n & 0x1) - x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) - x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) - x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) - x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) - x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) - x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) - x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) - - y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) - y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) - y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) - y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) - y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) - y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) - y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) - - x0 = (x0 &^ m) | (y0 & m) - x1 = (x1 &^ m) | (y1 & m) - x2 = (x2 &^ m) | (y2 & m) - x3 = (x3 &^ m) | (y3 & m) - x4 = (x4 &^ m) | (y4 & m) - x5 = (x5 &^ m) | (y5 & m) - x6 = (x6 &^ m) | (y6 & m) - - binary.LittleEndian.PutUint64(x[0*8:1*8], x0) - binary.LittleEndian.PutUint64(x[1*8:2*8], x1) - binary.LittleEndian.PutUint64(x[2*8:3*8], x2) - binary.LittleEndian.PutUint64(x[3*8:4*8], x3) - binary.LittleEndian.PutUint64(x[4*8:5*8], x4) - binary.LittleEndian.PutUint64(x[5*8:6*8], x5) - binary.LittleEndian.PutUint64(x[6*8:7*8], x6) -} - -func cswapGeneric(x, y *Elt, n uint) { - m := -uint64(n & 0x1) - x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) - x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) - x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) - x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) - x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) - x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) - x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) - - y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) - y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) - y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) - y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) - y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) - y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) - y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) - - t0 := m & (x0 ^ y0) - t1 := m & (x1 ^ y1) - t2 := m & (x2 ^ y2) - t3 := m & (x3 ^ y3) - t4 := m & (x4 ^ y4) - t5 := m & (x5 ^ y5) - t6 := m & (x6 ^ y6) - x0 ^= t0 - x1 ^= t1 - x2 ^= t2 - x3 ^= t3 - x4 ^= t4 - x5 ^= t5 - x6 ^= t6 - y0 ^= t0 - y1 ^= t1 - y2 ^= t2 - y3 ^= t3 - y4 ^= t4 - y5 ^= t5 - y6 ^= t6 - - binary.LittleEndian.PutUint64(x[0*8:1*8], x0) - binary.LittleEndian.PutUint64(x[1*8:2*8], x1) - binary.LittleEndian.PutUint64(x[2*8:3*8], x2) - binary.LittleEndian.PutUint64(x[3*8:4*8], x3) - binary.LittleEndian.PutUint64(x[4*8:5*8], x4) - binary.LittleEndian.PutUint64(x[5*8:6*8], x5) - binary.LittleEndian.PutUint64(x[6*8:7*8], x6) - - binary.LittleEndian.PutUint64(y[0*8:1*8], y0) - binary.LittleEndian.PutUint64(y[1*8:2*8], y1) - binary.LittleEndian.PutUint64(y[2*8:3*8], y2) - binary.LittleEndian.PutUint64(y[3*8:4*8], y3) - binary.LittleEndian.PutUint64(y[4*8:5*8], y4) - binary.LittleEndian.PutUint64(y[5*8:6*8], y5) - binary.LittleEndian.PutUint64(y[6*8:7*8], y6) -} - -func addGeneric(z, x, y *Elt) { - x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) - x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) - x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) - x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) - x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) - x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) - x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) - - y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) - y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) - y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) - y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) - y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) - y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) - y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) - - z0, c0 := bits.Add64(x0, y0, 0) - z1, c1 := bits.Add64(x1, y1, c0) - z2, c2 := bits.Add64(x2, y2, c1) - z3, c3 := bits.Add64(x3, y3, c2) - z4, c4 := bits.Add64(x4, y4, c3) - z5, c5 := bits.Add64(x5, y5, c4) - z6, z7 := bits.Add64(x6, y6, c5) - - z0, c0 = bits.Add64(z0, z7, 0) - z1, c1 = bits.Add64(z1, 0, c0) - z2, c2 = bits.Add64(z2, 0, c1) - z3, c3 = bits.Add64(z3, z7<<32, c2) - z4, c4 = bits.Add64(z4, 0, c3) - z5, c5 = bits.Add64(z5, 0, c4) - z6, z7 = bits.Add64(z6, 0, c5) - - z0, c0 = bits.Add64(z0, z7, 0) - z1, c1 = bits.Add64(z1, 0, c0) - z2, c2 = bits.Add64(z2, 0, c1) - z3, c3 = bits.Add64(z3, z7<<32, c2) - z4, c4 = bits.Add64(z4, 0, c3) - z5, c5 = bits.Add64(z5, 0, c4) - z6, _ = bits.Add64(z6, 0, c5) - - binary.LittleEndian.PutUint64(z[0*8:1*8], z0) - binary.LittleEndian.PutUint64(z[1*8:2*8], z1) - binary.LittleEndian.PutUint64(z[2*8:3*8], z2) - binary.LittleEndian.PutUint64(z[3*8:4*8], z3) - binary.LittleEndian.PutUint64(z[4*8:5*8], z4) - binary.LittleEndian.PutUint64(z[5*8:6*8], z5) - binary.LittleEndian.PutUint64(z[6*8:7*8], z6) -} - -func subGeneric(z, x, y *Elt) { - x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) - x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) - x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) - x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) - x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) - x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) - x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) - - y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) - y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) - y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) - y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) - y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) - y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) - y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) - - z0, c0 := bits.Sub64(x0, y0, 0) - z1, c1 := bits.Sub64(x1, y1, c0) - z2, c2 := bits.Sub64(x2, y2, c1) - z3, c3 := bits.Sub64(x3, y3, c2) - z4, c4 := bits.Sub64(x4, y4, c3) - z5, c5 := bits.Sub64(x5, y5, c4) - z6, z7 := bits.Sub64(x6, y6, c5) - - z0, c0 = bits.Sub64(z0, z7, 0) - z1, c1 = bits.Sub64(z1, 0, c0) - z2, c2 = bits.Sub64(z2, 0, c1) - z3, c3 = bits.Sub64(z3, z7<<32, c2) - z4, c4 = bits.Sub64(z4, 0, c3) - z5, c5 = bits.Sub64(z5, 0, c4) - z6, z7 = bits.Sub64(z6, 0, c5) - - z0, c0 = bits.Sub64(z0, z7, 0) - z1, c1 = bits.Sub64(z1, 0, c0) - z2, c2 = bits.Sub64(z2, 0, c1) - z3, c3 = bits.Sub64(z3, z7<<32, c2) - z4, c4 = bits.Sub64(z4, 0, c3) - z5, c5 = bits.Sub64(z5, 0, c4) - z6, _ = bits.Sub64(z6, 0, c5) - - binary.LittleEndian.PutUint64(z[0*8:1*8], z0) - binary.LittleEndian.PutUint64(z[1*8:2*8], z1) - binary.LittleEndian.PutUint64(z[2*8:3*8], z2) - binary.LittleEndian.PutUint64(z[3*8:4*8], z3) - binary.LittleEndian.PutUint64(z[4*8:5*8], z4) - binary.LittleEndian.PutUint64(z[5*8:6*8], z5) - binary.LittleEndian.PutUint64(z[6*8:7*8], z6) -} - -func addsubGeneric(x, y *Elt) { - z := &Elt{} - addGeneric(z, x, y) - subGeneric(y, x, y) - *x = *z -} - -func mulGeneric(z, x, y *Elt) { - x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) - x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) - x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) - x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) - x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) - x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) - x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) - - y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) - y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) - y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) - y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) - y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) - y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) - y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) - - yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6} - zz := [7]uint64{} - - yi := yy[0] - h0, l0 := bits.Mul64(x0, yi) - h1, l1 := bits.Mul64(x1, yi) - h2, l2 := bits.Mul64(x2, yi) - h3, l3 := bits.Mul64(x3, yi) - h4, l4 := bits.Mul64(x4, yi) - h5, l5 := bits.Mul64(x5, yi) - h6, l6 := bits.Mul64(x6, yi) - - zz[0] = l0 - a0, c0 := bits.Add64(h0, l1, 0) - a1, c1 := bits.Add64(h1, l2, c0) - a2, c2 := bits.Add64(h2, l3, c1) - a3, c3 := bits.Add64(h3, l4, c2) - a4, c4 := bits.Add64(h4, l5, c3) - a5, c5 := bits.Add64(h5, l6, c4) - a6, _ := bits.Add64(h6, 0, c5) - - for i := 1; i < 7; i++ { - yi = yy[i] - h0, l0 = bits.Mul64(x0, yi) - h1, l1 = bits.Mul64(x1, yi) - h2, l2 = bits.Mul64(x2, yi) - h3, l3 = bits.Mul64(x3, yi) - h4, l4 = bits.Mul64(x4, yi) - h5, l5 = bits.Mul64(x5, yi) - h6, l6 = bits.Mul64(x6, yi) - - zz[i], c0 = bits.Add64(a0, l0, 0) - a0, c1 = bits.Add64(a1, l1, c0) - a1, c2 = bits.Add64(a2, l2, c1) - a2, c3 = bits.Add64(a3, l3, c2) - a3, c4 = bits.Add64(a4, l4, c3) - a4, c5 = bits.Add64(a5, l5, c4) - a5, a6 = bits.Add64(a6, l6, c5) - - a0, c0 = bits.Add64(a0, h0, 0) - a1, c1 = bits.Add64(a1, h1, c0) - a2, c2 = bits.Add64(a2, h2, c1) - a3, c3 = bits.Add64(a3, h3, c2) - a4, c4 = bits.Add64(a4, h4, c3) - a5, c5 = bits.Add64(a5, h5, c4) - a6, _ = bits.Add64(a6, h6, c5) - } - red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6}) -} - -func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) } - -func red64(z *Elt, l, h *[7]uint64) { - /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */ - h0 := h[0] - h1 := h[1] - h2 := h[2] - h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF) - h4 := (h[3] >> 63) | (h[4] << 1) - h5 := (h[4] >> 63) | (h[5] << 1) - h6 := (h[5] >> 63) | (h[6] << 1) - h7 := (h[6] >> 63) - - l0, c0 := bits.Add64(h0, l[0], 0) - l1, c1 := bits.Add64(h1, l[1], c0) - l2, c2 := bits.Add64(h2, l[2], c1) - l3, c3 := bits.Add64(h3, l[3], c2) - l4, c4 := bits.Add64(h4, l[4], c3) - l5, c5 := bits.Add64(h5, l[5], c4) - l6, c6 := bits.Add64(h6, l[6], c5) - l7, _ := bits.Add64(h7, 0, c6) - - /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */ - h0 = (h[3] >> 32) | (h[4] << 32) - h1 = (h[4] >> 32) | (h[5] << 32) - h2 = (h[5] >> 32) | (h[6] << 32) - h3 = (h[6] >> 32) | (h[0] << 32) - h4 = (h[0] >> 32) | (h[1] << 32) - h5 = (h[1] >> 32) | (h[2] << 32) - h6 = (h[2] >> 32) | (h[3] << 32) - - l0, c0 = bits.Add64(l0, h0, 0) - l1, c1 = bits.Add64(l1, h1, c0) - l2, c2 = bits.Add64(l2, h2, c1) - l3, c3 = bits.Add64(l3, h3, c2) - l4, c4 = bits.Add64(l4, h4, c3) - l5, c5 = bits.Add64(l5, h5, c4) - l6, c6 = bits.Add64(l6, h6, c5) - l7, _ = bits.Add64(l7, 0, c6) - - /* (C7) + (C6,...,C0) */ - l0, c0 = bits.Add64(l0, l7, 0) - l1, c1 = bits.Add64(l1, 0, c0) - l2, c2 = bits.Add64(l2, 0, c1) - l3, c3 = bits.Add64(l3, l7<<32, c2) - l4, c4 = bits.Add64(l4, 0, c3) - l5, c5 = bits.Add64(l5, 0, c4) - l6, l7 = bits.Add64(l6, 0, c5) - - /* (C7) + (C6,...,C0) */ - l0, c0 = bits.Add64(l0, l7, 0) - l1, c1 = bits.Add64(l1, 0, c0) - l2, c2 = bits.Add64(l2, 0, c1) - l3, c3 = bits.Add64(l3, l7<<32, c2) - l4, c4 = bits.Add64(l4, 0, c3) - l5, c5 = bits.Add64(l5, 0, c4) - l6, _ = bits.Add64(l6, 0, c5) - - binary.LittleEndian.PutUint64(z[0*8:1*8], l0) - binary.LittleEndian.PutUint64(z[1*8:2*8], l1) - binary.LittleEndian.PutUint64(z[2*8:3*8], l2) - binary.LittleEndian.PutUint64(z[3*8:4*8], l3) - binary.LittleEndian.PutUint64(z[4*8:5*8], l4) - binary.LittleEndian.PutUint64(z[5*8:6*8], l5) - binary.LittleEndian.PutUint64(z[6*8:7*8], l6) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go deleted file mode 100644 index a62225d296..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !amd64 || purego -// +build !amd64 purego - -package fp448 - -func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } -func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } -func add(z, x, y *Elt) { addGeneric(z, x, y) } -func sub(z, x, y *Elt) { subGeneric(z, x, y) } -func addsub(x, y *Elt) { addsubGeneric(x, y) } -func mul(z, x, y *Elt) { mulGeneric(z, x, y) } -func sqr(z, x *Elt) { sqrGeneric(z, x) } diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go deleted file mode 100644 index 2d7afc8059..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go +++ /dev/null @@ -1,75 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -// How to run the fuzzer: -// -// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz -// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build -// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a -// $ clang -fsanitize=fuzzer lib.a -o fu.exe -// $ ./fu.exe -package fp448 - -import ( - "encoding/binary" - "fmt" - "math/big" - - "github.com/cloudflare/circl/internal/conv" -) - -// FuzzReduction is a fuzzer target for red64 function, which reduces t -// (112 bits) to a number t' (56 bits) congruent modulo p448. -func FuzzReduction(data []byte) int { - if len(data) != 2*Size { - return -1 - } - var got, want Elt - var lo, hi [7]uint64 - a := data[:Size] - b := data[Size:] - lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8]) - lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8]) - lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8]) - lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8]) - lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8]) - lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8]) - lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8]) - - hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8]) - hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8]) - hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8]) - hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8]) - hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8]) - hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8]) - hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8]) - - red64(&got, &lo, &hi) - - t := conv.BytesLe2BigInt(data[:2*Size]) - - two448 := big.NewInt(1) - two448.Lsh(two448, 448) // 2^448 - mask448 := big.NewInt(1) - mask448.Sub(two448, mask448) // 2^448-1 - two224plus1 := big.NewInt(1) - two224plus1.Lsh(two224plus1, 224) - two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1 - - var loBig, hiBig big.Int - for t.Cmp(two448) >= 0 { - loBig.And(t, mask448) - hiBig.Rsh(t, 448) - t.Mul(&hiBig, two224plus1) - t.Add(t, &loBig) - } - conv.BigInt2BytesLe(want[:], t) - - if got != want { - fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size])) - fmt.Printf("got: %v\n", got) - fmt.Printf("want: %v\n", want) - panic("error found") - } - return 1 -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/integer.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/integer.go deleted file mode 100644 index 9c80c23b59..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/integer.go +++ /dev/null @@ -1,16 +0,0 @@ -package math - -import "math/bits" - -// NextPow2 finds the next power of two (N=2^k, k>=0) greater than n. -// If n is already a power of two, then this function returns n, and log2(n). -func NextPow2(n uint) (N uint, k uint) { - if bits.OnesCount(n) == 1 { - k = uint(bits.TrailingZeros(n)) - N = n - } else { - k = uint(bits.Len(n)) - N = uint(1) << k - } - return -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go deleted file mode 100644 index a43851b8bb..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package mlsbset provides a constant-time exponentiation method with precomputation. -// -// References: "Efficient and secure algorithms for GLV-based scalar -// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) -// - https://doi.org/10.1007/s13389-014-0085-7 -// - https://eprint.iacr.org/2013/158 -package mlsbset - -import ( - "errors" - "fmt" - "math/big" - - "github.com/cloudflare/circl/internal/conv" -) - -// EltG is a group element. -type EltG interface{} - -// EltP is a precomputed group element. -type EltP interface{} - -// Group defines the operations required by MLSBSet exponentiation method. -type Group interface { - Identity() EltG // Returns the identity of the group. - Sqr(x EltG) // Calculates x = x^2. - Mul(x EltG, y EltP) // Calculates x = x*y. - NewEltP() EltP // Returns an arbitrary precomputed element. - ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)). - Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u]. -} - -// Params contains the parameters of the encoding. -type Params struct { - T uint // T is the maximum size (in bits) of exponents. - V uint // V is the number of tables. - W uint // W is the window size. - E uint // E is the number of digits per table. - D uint // D is the number of digits in total. - L uint // L is the length of the code. -} - -// Encoder allows to convert integers into valid powers. -type Encoder struct{ p Params } - -// New produces an encoder of the MLSBSet algorithm. -func New(t, v, w uint) (Encoder, error) { - if !(t > 1 && v >= 1 && w >= 2) { - return Encoder{}, errors.New("t>1, v>=1, w>=2") - } - e := (t + w*v - 1) / (w * v) - d := e * v - l := d * w - return Encoder{Params{t, v, w, e, d, l}}, nil -} - -// Encode converts an odd integer k into a valid power for exponentiation. -func (m Encoder) Encode(k []byte) (*Power, error) { - if len(k) == 0 { - return nil, errors.New("empty slice") - } - if !(len(k) <= int(m.p.L+7)>>3) { - return nil, errors.New("k too big") - } - if k[0]%2 == 0 { - return nil, errors.New("k must be odd") - } - ap := int((m.p.L+7)/8) - len(k) - k = append(k, make([]byte, ap)...) - s := m.signs(k) - b := make([]int32, m.p.L-m.p.D) - c := conv.BytesLe2BigInt(k) - c.Rsh(c, m.p.D) - var bi big.Int - for i := m.p.D; i < m.p.L; i++ { - c0 := int32(c.Bit(0)) - b[i-m.p.D] = s[i%m.p.D] * c0 - bi.SetInt64(int64(b[i-m.p.D] >> 1)) - c.Rsh(c, 1) - c.Sub(c, &bi) - } - carry := int(c.Int64()) - return &Power{m, s, b, carry}, nil -} - -// signs calculates the set of signs. -func (m Encoder) signs(k []byte) []int32 { - s := make([]int32, m.p.D) - s[m.p.D-1] = 1 - for i := uint(1); i < m.p.D; i++ { - ki := int32((k[i>>3] >> (i & 0x7)) & 0x1) - s[i-1] = 2*ki - 1 - } - return s -} - -// GetParams returns the complementary parameters of the encoding. -func (m Encoder) GetParams() Params { return m.p } - -// tableSize returns the size of each table. -func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) } - -// Elts returns the total number of elements that must be precomputed. -func (m Encoder) Elts() uint { return m.p.V * m.tableSize() } - -// IsExtended returns true if the element x^(2^(wd)) must be calculated. -func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W } - -// Ops returns the number of squares and multiplications executed during an exponentiation. -func (m Encoder) Ops() (S uint, M uint) { - S = m.p.E - M = m.p.E * m.p.V - if m.IsExtended() { - M++ - } - return -} - -func (m Encoder) String() string { - return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v", - m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended()) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/mlsbset/power.go deleted file mode 100644 index 3f214c3046..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/mlsbset/power.go +++ /dev/null @@ -1,64 +0,0 @@ -package mlsbset - -import "fmt" - -// Power is a valid exponent produced by the MLSBSet encoding algorithm. -type Power struct { - set Encoder // parameters of code. - s []int32 // set of signs. - b []int32 // set of digits. - c int // carry is {0,1}. -} - -// Exp is calculates x^k, where x is a predetermined element of a group G. -func (p *Power) Exp(G Group) EltG { - a, b := G.Identity(), G.NewEltP() - for e := int(p.set.p.E - 1); e >= 0; e-- { - G.Sqr(a) - for v := uint(0); v < p.set.p.V; v++ { - sgnElt, idElt := p.Digit(v, uint(e)) - G.Lookup(b, v, sgnElt, idElt) - G.Mul(a, b) - } - } - if p.set.IsExtended() && p.c == 1 { - G.Mul(a, G.ExtendedEltP()) - } - return a -} - -// Digit returns the (v,e)-th digit and its sign. -func (p *Power) Digit(v, e uint) (sgn, dig int32) { - sgn = p.bit(0, v, e) - dig = 0 - for i := p.set.p.W - 1; i > 0; i-- { - dig = 2*dig + p.bit(i, v, e) - } - mask := dig >> 31 - dig = (dig + mask) ^ mask - return sgn, dig -} - -// bit returns the (w,v,e)-th bit of the code. -func (p *Power) bit(w, v, e uint) int32 { - if !(w < p.set.p.W && - v < p.set.p.V && - e < p.set.p.E) { - panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e)) - } - if w == 0 { - return p.s[p.set.p.E*v+e] - } - return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e] -} - -func (p *Power) String() string { - dig := "" - for j := uint(0); j < p.set.p.V; j++ { - for i := uint(0); i < p.set.p.E; i++ { - s, d := p.Digit(j, i) - dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d) - } - } - return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/primes.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/primes.go deleted file mode 100644 index 158fd83a7a..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/primes.go +++ /dev/null @@ -1,34 +0,0 @@ -package math - -import ( - "crypto/rand" - "io" - "math/big" -) - -// IsSafePrime reports whether p is (probably) a safe prime. -// The prime p=2*q+1 is safe prime if both p and q are primes. -// Note that ProbablyPrime is not suitable for judging primes -// that an adversary may have crafted to fool the test. -func IsSafePrime(p *big.Int) bool { - pdiv2 := new(big.Int).Rsh(p, 1) - return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20) -} - -// SafePrime returns a number of the given bit length that is a safe prime with high probability. -// The number returned p=2*q+1 is a safe prime if both p and q are primes. -// SafePrime will return error for any error returned by rand.Read or if bits < 2. -func SafePrime(random io.Reader, bits int) (*big.Int, error) { - one := big.NewInt(1) - p := new(big.Int) - for { - q, err := rand.Prime(random, bits-1) - if err != nil { - return nil, err - } - p.Lsh(q, 1).Add(p, one) - if p.ProbablyPrime(20) { - return p, nil - } - } -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/math/wnaf.go b/openshift/tools/vendor/github.com/cloudflare/circl/math/wnaf.go deleted file mode 100644 index 94a1ec5042..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/math/wnaf.go +++ /dev/null @@ -1,84 +0,0 @@ -// Package math provides some utility functions for big integers. -package math - -import "math/big" - -// SignedDigit obtains the signed-digit recoding of n and returns a list L of -// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number -// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the -// output has ceil(l/(w-1)) digits. -// -// Restrictions: -// - n is odd and n > 0. -// - 1 < w < 32. -// - l >= bit length of n. -// -// References: -// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms" -// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21 -// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and -// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y -func SignedDigit(n *big.Int, w, l uint) []int32 { - if n.Sign() <= 0 || n.Bit(0) == 0 { - panic("n must be non-zero, odd, and positive") - } - if w <= 1 || w >= 32 { - panic("Verify that 1 < w < 32") - } - if uint(n.BitLen()) > l { - panic("n is too big to fit in l digits") - } - lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1)) - L := make([]int32, lenN+1) - var k, v big.Int - k.Set(n) - - var i uint - for i = 0; i < lenN; i++ { - words := k.Bits() - value := int32(words[0] & ((1 << w) - 1)) - value -= int32(1) << (w - 1) - L[i] = value - v.SetInt64(int64(value)) - k.Sub(&k, &v) - k.Rsh(&k, w-1) - } - L[i] = int32(k.Int64()) - return L -} - -// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and -// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ). -// -// Reference: -// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas. -// http://doi.org/10.1023/A:1008306223194 -func OmegaNAF(n *big.Int, w uint) (L []int32) { - if n.Sign() < 0 { - panic("n must be positive") - } - if w <= 1 || w >= 32 { - panic("Verify that 1 < w < 32") - } - - L = make([]int32, n.BitLen()+1) - var k, v big.Int - k.Set(n) - - i := 0 - for ; k.Sign() > 0; i++ { - value := int32(0) - if k.Bit(0) == 1 { - words := k.Bits() - value = int32(words[0] & ((1 << w) - 1)) - if value >= (int32(1) << (w - 1)) { - value -= int32(1) << w - } - v.SetInt64(int64(value)) - k.Sub(&k, &v) - } - L[i] = value - k.Rsh(&k, 1) - } - return L[:i] -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go deleted file mode 100644 index 2c73c26fb1..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go +++ /dev/null @@ -1,453 +0,0 @@ -// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032. -// -// This package provides optimized implementations of the three signature -// variants and maintaining closer compatibility with crypto/ed25519. -// -// | Scheme Name | Sign Function | Verification | Context | -// |-------------|-------------------|---------------|-------------------| -// | Ed25519 | Sign | Verify | None | -// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty | -// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty | -// | All above | (PrivateKey).Sign | VerifyAny | As above | -// -// Specific functions for sign and verify are defined. A generic signing -// function for all schemes is available through the crypto.Signer interface, -// which is implemented by the PrivateKey type. A correspond all-in-one -// verification method is provided by the VerifyAny function. -// -// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain -// separation. This parameter is passed using a SignerOptions struct defined -// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx -// enforces non-empty context strings. -// -// # Compatibility with crypto.ed25519 -// -// These functions are compatible with the “Ed25519” function defined in -// RFC-8032. However, unlike RFC 8032's formulation, this package's private -// key representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the -// RFC-8032 private key as the “seed”. -// -// References -// -// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt -// - Ed25519: https://ed25519.cr.yp.to/ -// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1 -package ed25519 - -import ( - "bytes" - "crypto" - cryptoRand "crypto/rand" - "crypto/sha512" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "github.com/cloudflare/circl/sign" -) - -const ( - // ContextMaxSize is the maximum length (in bytes) allowed for context. - ContextMaxSize = 255 - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -const ( - paramB = 256 / 8 // Size of keys in bytes. -) - -// SignerOptions implements crypto.SignerOpts and augments with parameters -// that are specific to the Ed25519 signature schemes. -type SignerOptions struct { - // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512 - // for Ed25519ph. - crypto.Hash - - // Context is an optional domain separation string for Ed25519ph and a - // must for Ed25519ctx. Its length must be less or equal than 255 bytes. - Context string - - // Scheme is an identifier for choosing a signature scheme. The zero value - // is ED25519. - Scheme SchemeID -} - -// SchemeID is an identifier for each signature scheme. -type SchemeID uint - -const ( - ED25519 SchemeID = iota - ED25519Ph - ED25519Ctx -) - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Equal reports whether priv and x have the same value. -func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { - xx, ok := x.(PrivateKey) - return ok && subtle.ConstantTimeCompare(priv, xx) == 1 -} - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make(PublicKey, PublicKeySize) - copy(publicKey, priv[SeedSize:]) - return publicKey -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:SeedSize]) - return seed -} - -func (priv PrivateKey) Scheme() sign.Scheme { return sch } - -func (pub PublicKey) Scheme() sign.Scheme { return sch } - -func (priv PrivateKey) MarshalBinary() (data []byte, err error) { - privateKey := make(PrivateKey, PrivateKeySize) - copy(privateKey, priv) - return privateKey, nil -} - -func (pub PublicKey) MarshalBinary() (data []byte, err error) { - publicKey := make(PublicKey, PublicKeySize) - copy(publicKey, pub) - return publicKey, nil -} - -// Equal reports whether pub and x have the same value. -func (pub PublicKey) Equal(x crypto.PublicKey) bool { - xx, ok := x.(PublicKey) - return ok && bytes.Equal(pub, xx) -} - -// Sign creates a signature of a message with priv key. -// This function is compatible with crypto.ed25519 and also supports the -// three signature variants defined in RFC-8032, namely Ed25519 (or pure -// EdDSA), Ed25519Ph, and Ed25519Ctx. -// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx -// variant. This can be achieved by passing crypto.Hash(0) as the value for -// opts. -// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. -// This can be achieved by passing crypto.SHA512 as the value for opts. -// Use a SignerOptions struct (defined in this package) to pass a context -// string for signing. -func (priv PrivateKey) Sign( - rand io.Reader, - message []byte, - opts crypto.SignerOpts, -) (signature []byte, err error) { - var ctx string - var scheme SchemeID - if o, ok := opts.(SignerOptions); ok { - ctx = o.Context - scheme = o.Scheme - } - - switch true { - case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): - return Sign(priv, message), nil - case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: - return SignPh(priv, message, ctx), nil - case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: - return SignWithCtx(priv, message, ctx), nil - default: - return nil, errors.New("ed25519: bad hash algorithm") - } -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptoRand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make(PublicKey, PublicKeySize) - copy(publicKey, privateKey[SeedSize:]) - - return publicKey, privateKey, nil -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - privateKey := make(PrivateKey, PrivateKeySize) - newKeyFromSeed(privateKey, seed) - return privateKey -} - -func newKeyFromSeed(privateKey, seed []byte) { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - var P pointR1 - k := sha512.Sum512(seed) - clamp(k[:]) - reduceModOrder(k[:paramB], false) - P.fixedMult(k[:paramB]) - copy(privateKey[:SeedSize], seed) - _ = P.ToBytes(privateKey[SeedSize:]) -} - -func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - H := sha512.New() - var PHM []byte - - if preHash { - _, _ = H.Write(message) - PHM = H.Sum(nil) - H.Reset() - } else { - PHM = message - } - - // 1. Hash the 32-byte private key using SHA-512. - _, _ = H.Write(privateKey[:SeedSize]) - h := H.Sum(nil) - clamp(h[:]) - prefix, s := h[paramB:], h[:paramB] - - // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M)) - H.Reset() - - writeDom(H, ctx, preHash) - - _, _ = H.Write(prefix) - _, _ = H.Write(PHM) - r := H.Sum(nil) - reduceModOrder(r[:], true) - - // 3. Compute the point [r]B. - var P pointR1 - P.fixedMult(r[:paramB]) - R := (&[paramB]byte{})[:] - if err := P.ToBytes(R); err != nil { - panic(err) - } - - // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)). - H.Reset() - - writeDom(H, ctx, preHash) - - _, _ = H.Write(R) - _, _ = H.Write(privateKey[SeedSize:]) - _, _ = H.Write(PHM) - hRAM := H.Sum(nil) - - reduceModOrder(hRAM[:], true) - - // 5. Compute S = (r + k * s) mod order. - S := (&[paramB]byte{})[:] - calculateS(S, r[:paramB], hRAM[:paramB], s) - - // 6. The signature is the concatenation of R and S. - copy(signature[:paramB], R[:]) - copy(signature[paramB:], S[:]) -} - -// Sign signs the message with privateKey and returns a signature. -// This function supports the signature variant defined in RFC-8032: Ed25519, -// also known as the pure version of EdDSA. -// It will panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - signature := make([]byte, SignatureSize) - signAll(signature, privateKey, message, []byte(""), false) - return signature -} - -// SignPh creates a signature of a message with private key and context. -// This function supports the signature variant defined in RFC-8032: Ed25519ph, -// meaning it internally hashes the message using SHA-512, and optionally -// accepts a context string. -// It will panic if len(privateKey) is not PrivateKeySize. -// Context could be passed to this function, which length should be no more than -// ContextMaxSize=255. It can be empty. -func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte { - if len(ctx) > ContextMaxSize { - panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx))) - } - - signature := make([]byte, SignatureSize) - signAll(signature, privateKey, message, []byte(ctx), true) - return signature -} - -// SignWithCtx creates a signature of a message with private key and context. -// This function supports the signature variant defined in RFC-8032: Ed25519ctx, -// meaning it accepts a non-empty context string. -// It will panic if len(privateKey) is not PrivateKeySize. -// Context must be passed to this function, which length should be no more than -// ContextMaxSize=255 and cannot be empty. -func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte { - if len(ctx) == 0 || len(ctx) > ContextMaxSize { - panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize)) - } - - signature := make([]byte, SignatureSize) - signAll(signature, privateKey, message, []byte(ctx), false) - return signature -} - -func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { - if len(public) != PublicKeySize || - len(signature) != SignatureSize || - !isLessThanOrder(signature[paramB:]) { - return false - } - - var P pointR1 - if ok := P.FromBytes(public); !ok { - return false - } - - H := sha512.New() - var PHM []byte - - if preHash { - _, _ = H.Write(message) - PHM = H.Sum(nil) - H.Reset() - } else { - PHM = message - } - - R := signature[:paramB] - - writeDom(H, ctx, preHash) - - _, _ = H.Write(R) - _, _ = H.Write(public) - _, _ = H.Write(PHM) - hRAM := H.Sum(nil) - reduceModOrder(hRAM[:], true) - - var Q pointR1 - encR := (&[paramB]byte{})[:] - P.neg() - Q.doubleMult(&P, signature[paramB:], hRAM[:paramB]) - _ = Q.ToBytes(encR) - return bytes.Equal(R, encR) -} - -// VerifyAny returns true if the signature is valid. Failure cases are invalid -// signature, or when the public key cannot be decoded. -// This function supports all the three signature variants defined in RFC-8032, -// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx. -// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx -// variant. This can be achieved by passing crypto.Hash(0) as the value for opts. -// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. -// This can be achieved by passing crypto.SHA512 as the value for opts. -// Use a SignerOptions struct to pass a context string for signing. -func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { - var ctx string - var scheme SchemeID - if o, ok := opts.(SignerOptions); ok { - ctx = o.Context - scheme = o.Scheme - } - - switch true { - case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): - return Verify(public, message, signature) - case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: - return VerifyPh(public, message, signature, ctx) - case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: - return VerifyWithCtx(public, message, signature, ctx) - default: - return false - } -} - -// Verify returns true if the signature is valid. Failure cases are invalid -// signature, or when the public key cannot be decoded. -// This function supports the signature variant defined in RFC-8032: Ed25519, -// also known as the pure version of EdDSA. -func Verify(public PublicKey, message, signature []byte) bool { - return verify(public, message, signature, []byte(""), false) -} - -// VerifyPh returns true if the signature is valid. Failure cases are invalid -// signature, or when the public key cannot be decoded. -// This function supports the signature variant defined in RFC-8032: Ed25519ph, -// meaning it internally hashes the message using SHA-512. -// Context could be passed to this function, which length should be no more than -// 255. It can be empty. -func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { - return verify(public, message, signature, []byte(ctx), true) -} - -// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid -// signature, or when the public key cannot be decoded, or when context is -// not provided. -// This function supports the signature variant defined in RFC-8032: Ed25519ctx, -// meaning it does not handle prehashed messages. Non-empty context string must be -// provided, and must not be more than 255 of length. -func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool { - if len(ctx) == 0 || len(ctx) > ContextMaxSize { - return false - } - - return verify(public, message, signature, []byte(ctx), false) -} - -func clamp(k []byte) { - k[0] &= 248 - k[paramB-1] = (k[paramB-1] & 127) | 64 -} - -// isLessThanOrder returns true if 0 <= x < order. -func isLessThanOrder(x []byte) bool { - i := len(order) - 1 - for i > 0 && x[i] == order[i] { - i-- - } - return x[i] < order[i] -} - -func writeDom(h io.Writer, ctx []byte, preHash bool) { - dom2 := "SigEd25519 no Ed25519 collisions" - - if len(ctx) > 0 { - _, _ = h.Write([]byte(dom2)) - if preHash { - _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) - } else { - _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) - } - _, _ = h.Write(ctx) - } else if preHash { - _, _ = h.Write([]byte(dom2)) - _, _ = h.Write([]byte{0x01, 0x00}) - } -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go deleted file mode 100644 index 10efafdcaf..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go +++ /dev/null @@ -1,175 +0,0 @@ -package ed25519 - -import ( - "encoding/binary" - "math/bits" -) - -var order = [paramB]byte{ - 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, - 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, -} - -// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length. -func isLessThan(x, y []byte) bool { - i := len(x) - 1 - for i > 0 && x[i] == y[i] { - i-- - } - return x[i] < y[i] -} - -// reduceModOrder calculates k = k mod order of the curve. -func reduceModOrder(k []byte, is512Bit bool) { - var X [((2 * paramB) * 8) / 64]uint64 - numWords := len(k) >> 3 - for i := 0; i < numWords; i++ { - X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8]) - } - red512(&X, is512Bit) - for i := 0; i < numWords; i++ { - binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i]) - } -} - -// red512 calculates x = x mod Order of the curve. -func red512(x *[8]uint64, full bool) { - // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied - // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone. - const ( - ell0 = uint64(0x5812631a5cf5d3ed) - ell1 = uint64(0x14def9dea2f79cd6) - ell160 = uint64(0x812631a5cf5d3ed0) - ell161 = uint64(0x4def9dea2f79cd65) - ell162 = uint64(0x0000000000000001) - ) - - var c0, c1, c2, c3 uint64 - r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0) - - if full { - q0, q1, q2, q3 := x[4], x[5], x[6], x[7] - - for i := 0; i < 3; i++ { - h0, s0 := bits.Mul64(q0, ell160) - h1, s1 := bits.Mul64(q1, ell160) - h2, s2 := bits.Mul64(q2, ell160) - h3, s3 := bits.Mul64(q3, ell160) - - s1, c0 = bits.Add64(h0, s1, 0) - s2, c1 = bits.Add64(h1, s2, c0) - s3, c2 = bits.Add64(h2, s3, c1) - s4, _ := bits.Add64(h3, 0, c2) - - h0, l0 := bits.Mul64(q0, ell161) - h1, l1 := bits.Mul64(q1, ell161) - h2, l2 := bits.Mul64(q2, ell161) - h3, l3 := bits.Mul64(q3, ell161) - - l1, c0 = bits.Add64(h0, l1, 0) - l2, c1 = bits.Add64(h1, l2, c0) - l3, c2 = bits.Add64(h2, l3, c1) - l4, _ := bits.Add64(h3, 0, c2) - - s1, c0 = bits.Add64(s1, l0, 0) - s2, c1 = bits.Add64(s2, l1, c0) - s3, c2 = bits.Add64(s3, l2, c1) - s4, c3 = bits.Add64(s4, l3, c2) - s5, s6 := bits.Add64(l4, 0, c3) - - s2, c0 = bits.Add64(s2, q0, 0) - s3, c1 = bits.Add64(s3, q1, c0) - s4, c2 = bits.Add64(s4, q2, c1) - s5, c3 = bits.Add64(s5, q3, c2) - s6, s7 := bits.Add64(s6, 0, c3) - - q := q0 | q1 | q2 | q3 - m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1 - s0 &= m - s1 &= m - s2 &= m - s3 &= m - q0, q1, q2, q3 = s4, s5, s6, s7 - - if (i+1)%2 == 0 { - r0, c0 = bits.Add64(r0, s0, 0) - r1, c1 = bits.Add64(r1, s1, c0) - r2, c2 = bits.Add64(r2, s2, c1) - r3, c3 = bits.Add64(r3, s3, c2) - r4, _ = bits.Add64(r4, 0, c3) - } else { - r0, c0 = bits.Sub64(r0, s0, 0) - r1, c1 = bits.Sub64(r1, s1, c0) - r2, c2 = bits.Sub64(r2, s2, c1) - r3, c3 = bits.Sub64(r3, s3, c2) - r4, _ = bits.Sub64(r4, 0, c3) - } - } - - m := -(r4 >> 63) - r0, c0 = bits.Add64(r0, m&ell160, 0) - r1, c1 = bits.Add64(r1, m&ell161, c0) - r2, c2 = bits.Add64(r2, m&ell162, c1) - r3, c3 = bits.Add64(r3, 0, c2) - r4, _ = bits.Add64(r4, m&1, c3) - x[4], x[5], x[6], x[7] = 0, 0, 0, 0 - } - - q0 := (r4 << 4) | (r3 >> 60) - r3 &= (uint64(1) << 60) - 1 - - h0, s0 := bits.Mul64(ell0, q0) - h1, s1 := bits.Mul64(ell1, q0) - s1, c0 = bits.Add64(h0, s1, 0) - s2, _ := bits.Add64(h1, 0, c0) - - r0, c0 = bits.Sub64(r0, s0, 0) - r1, c1 = bits.Sub64(r1, s1, c0) - r2, c2 = bits.Sub64(r2, s2, c1) - r3, _ = bits.Sub64(r3, 0, c2) - - x[0], x[1], x[2], x[3] = r0, r1, r2, r3 -} - -// calculateS performs s = r+k*a mod Order of the curve. -func calculateS(s, r, k, a []byte) { - K := [4]uint64{ - binary.LittleEndian.Uint64(k[0*8 : 1*8]), - binary.LittleEndian.Uint64(k[1*8 : 2*8]), - binary.LittleEndian.Uint64(k[2*8 : 3*8]), - binary.LittleEndian.Uint64(k[3*8 : 4*8]), - } - S := [8]uint64{ - binary.LittleEndian.Uint64(r[0*8 : 1*8]), - binary.LittleEndian.Uint64(r[1*8 : 2*8]), - binary.LittleEndian.Uint64(r[2*8 : 3*8]), - binary.LittleEndian.Uint64(r[3*8 : 4*8]), - } - var c3 uint64 - for i := range K { - ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8]) - - h0, l0 := bits.Mul64(K[0], ai) - h1, l1 := bits.Mul64(K[1], ai) - h2, l2 := bits.Mul64(K[2], ai) - h3, l3 := bits.Mul64(K[3], ai) - - l1, c0 := bits.Add64(h0, l1, 0) - l2, c1 := bits.Add64(h1, l2, c0) - l3, c2 := bits.Add64(h2, l3, c1) - l4, _ := bits.Add64(h3, 0, c2) - - S[i+0], c0 = bits.Add64(S[i+0], l0, 0) - S[i+1], c1 = bits.Add64(S[i+1], l1, c0) - S[i+2], c2 = bits.Add64(S[i+2], l2, c1) - S[i+3], c3 = bits.Add64(S[i+3], l3, c2) - S[i+4], _ = bits.Add64(S[i+4], l4, c3) - } - red512(&S, true) - binary.LittleEndian.PutUint64(s[0*8:1*8], S[0]) - binary.LittleEndian.PutUint64(s[1*8:2*8], S[1]) - binary.LittleEndian.PutUint64(s[2*8:3*8], S[2]) - binary.LittleEndian.PutUint64(s[3*8:4*8], S[3]) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go deleted file mode 100644 index 3216aae303..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go +++ /dev/null @@ -1,180 +0,0 @@ -package ed25519 - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" - - "github.com/cloudflare/circl/internal/conv" - "github.com/cloudflare/circl/math" - fp "github.com/cloudflare/circl/math/fp25519" -) - -var paramD = fp.Elt{ - 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, - 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, - 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, - 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52, -} - -// mLSBRecoding parameters. -const ( - fxT = 257 - fxV = 2 - fxW = 3 - fx2w1 = 1 << (uint(fxW) - 1) - numWords64 = (paramB * 8 / 64) -) - -// mLSBRecoding is the odd-only modified LSB-set. -// -// Reference: -// -// "Efficient and secure algorithms for GLV-based scalar multiplication and -// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) -// http://doi.org/10.1007/s13389-014-0085-7. -func mLSBRecoding(L []int8, k []byte) { - const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) - const dd = ee * fxV - const ll = dd * fxW - if len(L) == (ll + 1) { - var m [numWords64 + 1]uint64 - for i := 0; i < numWords64; i++ { - m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8]) - } - condAddOrderN(&m) - L[dd-1] = 1 - for i := 0; i < dd-1; i++ { - kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1 - L[i] = int8(kip1<<1) - 1 - } - { // right-shift by d - right := uint(dd % 64) - left := uint(64) - right - lim := ((numWords64+1)*64 - dd) / 64 - j := dd / 64 - for i := 0; i < lim; i++ { - m[i] = (m[i+j] >> right) | (m[i+j+1] << left) - } - m[lim] = m[lim+j] >> right - } - for i := dd; i < ll; i++ { - L[i] = L[i%dd] * int8(m[0]&0x1) - div2subY(m[:], int64(L[i]>>1), numWords64) - } - L[ll] = int8(m[0]) - } -} - -// absolute returns always a positive value. -func absolute(x int32) int32 { - mask := x >> 31 - return (x + mask) ^ mask -} - -// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged. -func condAddOrderN(x *[numWords64 + 1]uint64) { - isOdd := (x[0] & 0x1) - 1 - c := uint64(0) - for i := 0; i < numWords64; i++ { - orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8]) - o := isOdd & orderWord - x0, c0 := bits.Add64(x[i], o, c) - x[i] = x0 - c = c0 - } - x[numWords64], _ = bits.Add64(x[numWords64], 0, c) -} - -// div2subY update x = (x/2) - y. -func div2subY(x []uint64, y int64, l int) { - s := uint64(y >> 63) - for i := 0; i < l-1; i++ { - x[i] = (x[i] >> 1) | (x[i+1] << 63) - } - x[l-1] = (x[l-1] >> 1) - - b := uint64(0) - x0, b0 := bits.Sub64(x[0], uint64(y), b) - x[0] = x0 - b = b0 - for i := 1; i < l-1; i++ { - x0, b0 := bits.Sub64(x[i], s, b) - x[i] = x0 - b = b0 - } - x[l-1], _ = bits.Sub64(x[l-1], s, b) -} - -func (P *pointR1) fixedMult(scalar []byte) { - if len(scalar) != paramB { - panic("wrong scalar size") - } - const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) - const dd = ee * fxV - const ll = dd * fxW - - L := make([]int8, ll+1) - mLSBRecoding(L[:], scalar) - S := &pointR3{} - P.SetIdentity() - for ii := ee - 1; ii >= 0; ii-- { - P.double() - for j := 0; j < fxV; j++ { - dig := L[fxW*dd-j*ee+ii-ee] - for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd { - dig = 2*dig + L[i] - } - idx := absolute(int32(dig)) - sig := L[dd-j*ee+ii-ee] - Tabj := &tabSign[fxV-j-1] - for k := 0; k < fx2w1; k++ { - S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx)) - } - S.cneg(subtle.ConstantTimeEq(int32(sig), -1)) - P.mixAdd(S) - } - } -} - -const ( - omegaFix = 7 - omegaVar = 5 -) - -// doubleMult returns P=mG+nQ. -func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) { - nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix) - nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar) - - if len(nafFix) > len(nafVar) { - nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) - } else if len(nafFix) < len(nafVar) { - nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) - } - - var TabQ [1 << (omegaVar - 2)]pointR2 - Q.oddMultiples(TabQ[:]) - P.SetIdentity() - for i := len(nafFix) - 1; i >= 0; i-- { - P.double() - // Generator point - if nafFix[i] != 0 { - idxM := absolute(nafFix[i]) >> 1 - R := tabVerif[idxM] - if nafFix[i] < 0 { - R.neg() - } - P.mixAdd(&R) - } - // Variable input point - if nafVar[i] != 0 { - idxN := absolute(nafVar[i]) >> 1 - S := TabQ[idxN] - if nafVar[i] < 0 { - S.neg() - } - P.add(&S) - } - } -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/point.go deleted file mode 100644 index d1c3b146b7..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/point.go +++ /dev/null @@ -1,195 +0,0 @@ -package ed25519 - -import fp "github.com/cloudflare/circl/math/fp25519" - -type ( - pointR1 struct{ x, y, z, ta, tb fp.Elt } - pointR2 struct { - pointR3 - z2 fp.Elt - } -) -type pointR3 struct{ addYX, subYX, dt2 fp.Elt } - -func (P *pointR1) neg() { - fp.Neg(&P.x, &P.x) - fp.Neg(&P.ta, &P.ta) -} - -func (P *pointR1) SetIdentity() { - P.x = fp.Elt{} - fp.SetOne(&P.y) - fp.SetOne(&P.z) - P.ta = fp.Elt{} - P.tb = fp.Elt{} -} - -func (P *pointR1) toAffine() { - fp.Inv(&P.z, &P.z) - fp.Mul(&P.x, &P.x, &P.z) - fp.Mul(&P.y, &P.y, &P.z) - fp.Modp(&P.x) - fp.Modp(&P.y) - fp.SetOne(&P.z) - P.ta = P.x - P.tb = P.y -} - -func (P *pointR1) ToBytes(k []byte) error { - P.toAffine() - var x [fp.Size]byte - err := fp.ToBytes(k[:fp.Size], &P.y) - if err != nil { - return err - } - err = fp.ToBytes(x[:], &P.x) - if err != nil { - return err - } - b := x[0] & 1 - k[paramB-1] = k[paramB-1] | (b << 7) - return nil -} - -func (P *pointR1) FromBytes(k []byte) bool { - if len(k) != paramB { - panic("wrong size") - } - signX := k[paramB-1] >> 7 - copy(P.y[:], k[:fp.Size]) - P.y[fp.Size-1] &= 0x7F - p := fp.P() - if !isLessThan(P.y[:], p[:]) { - return false - } - - one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{} - fp.SetOne(one) - fp.Sqr(u, &P.y) // u = y^2 - fp.Mul(v, u, ¶mD) // v = dy^2 - fp.Sub(u, u, one) // u = y^2-1 - fp.Add(v, v, one) // v = dy^2+1 - isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) - if !isQR { - return false - } - fp.Modp(&P.x) // x = x mod p - if fp.IsZero(&P.x) && signX == 1 { - return false - } - if signX != (P.x[0] & 1) { - fp.Neg(&P.x, &P.x) - } - P.ta = P.x - P.tb = P.y - fp.SetOne(&P.z) - return true -} - -// double calculates 2P for curves with A=-1. -func (P *pointR1) double() { - Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb - a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb - fp.Add(e, Px, Py) // x+y - fp.Sqr(a, Px) // A = x^2 - fp.Sqr(b, Py) // B = y^2 - fp.Sqr(c, Pz) // z^2 - fp.Add(c, c, c) // C = 2*z^2 - fp.Add(h, a, b) // H = A+B - fp.Sqr(e, e) // (x+y)^2 - fp.Sub(e, e, h) // E = (x+y)^2-A-B - fp.Sub(g, b, a) // G = B-A - fp.Sub(f, c, g) // F = C-G - fp.Mul(Pz, f, g) // Z = F * G - fp.Mul(Px, e, f) // X = E * F - fp.Mul(Py, g, h) // Y = G * H, T = E * H -} - -func (P *pointR1) mixAdd(Q *pointR3) { - fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 - P.coreAddition(Q) -} - -func (P *pointR1) add(Q *pointR2) { - fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 - P.coreAddition(&Q.pointR3) -} - -// coreAddition calculates P=P+Q for curves with A=-1. -func (P *pointR1) coreAddition(Q *pointR3) { - Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb - addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 - a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb - fp.Mul(c, Pta, Ptb) // t1 = ta*tb - fp.Sub(h, Py, Px) // y1-x1 - fp.Add(b, Py, Px) // y1+x1 - fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) - fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) - fp.Mul(c, c, dt2) // C = 2*D*t1*t2 - fp.Sub(e, b, a) // E = B-A - fp.Add(h, b, a) // H = B+A - fp.Sub(f, d, c) // F = D-C - fp.Add(g, d, c) // G = D+C - fp.Mul(Pz, f, g) // Z = F * G - fp.Mul(Px, e, f) // X = E * F - fp.Mul(Py, g, h) // Y = G * H, T = E * H -} - -func (P *pointR1) oddMultiples(T []pointR2) { - var R pointR2 - n := len(T) - T[0].fromR1(P) - _2P := *P - _2P.double() - R.fromR1(&_2P) - for i := 1; i < n; i++ { - P.add(&R) - T[i].fromR1(P) - } -} - -func (P *pointR1) isEqual(Q *pointR1) bool { - l, r := &fp.Elt{}, &fp.Elt{} - fp.Mul(l, &P.x, &Q.z) - fp.Mul(r, &Q.x, &P.z) - fp.Sub(l, l, r) - b := fp.IsZero(l) - fp.Mul(l, &P.y, &Q.z) - fp.Mul(r, &Q.y, &P.z) - fp.Sub(l, l, r) - b = b && fp.IsZero(l) - fp.Mul(l, &P.ta, &P.tb) - fp.Mul(l, l, &Q.z) - fp.Mul(r, &Q.ta, &Q.tb) - fp.Mul(r, r, &P.z) - fp.Sub(l, l, r) - b = b && fp.IsZero(l) - return b && !fp.IsZero(&P.z) && !fp.IsZero(&Q.z) -} - -func (P *pointR3) neg() { - P.addYX, P.subYX = P.subYX, P.addYX - fp.Neg(&P.dt2, &P.dt2) -} - -func (P *pointR2) fromR1(Q *pointR1) { - fp.Add(&P.addYX, &Q.y, &Q.x) - fp.Sub(&P.subYX, &Q.y, &Q.x) - fp.Mul(&P.dt2, &Q.ta, &Q.tb) - fp.Mul(&P.dt2, &P.dt2, ¶mD) - fp.Add(&P.dt2, &P.dt2, &P.dt2) - fp.Add(&P.z2, &Q.z, &Q.z) -} - -func (P *pointR3) cneg(b int) { - t := &fp.Elt{} - fp.Cswap(&P.addYX, &P.subYX, uint(b)) - fp.Neg(t, &P.dt2) - fp.Cmov(&P.dt2, t, uint(b)) -} - -func (P *pointR3) cmov(Q *pointR3, b int) { - fp.Cmov(&P.addYX, &Q.addYX, uint(b)) - fp.Cmov(&P.subYX, &Q.subYX, uint(b)) - fp.Cmov(&P.dt2, &Q.dt2, uint(b)) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go deleted file mode 100644 index c3505b67ac..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -package ed25519 - -import cryptoEd25519 "crypto/ed25519" - -// PublicKey is the type of Ed25519 public keys. -type PublicKey cryptoEd25519.PublicKey diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go deleted file mode 100644 index d57d86eff0..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !go1.13 -// +build !go1.13 - -package ed25519 - -// PublicKey is the type of Ed25519 public keys. -type PublicKey []byte diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go deleted file mode 100644 index e4520f5203..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go +++ /dev/null @@ -1,87 +0,0 @@ -package ed25519 - -import ( - "crypto/rand" - "encoding/asn1" - - "github.com/cloudflare/circl/sign" -) - -var sch sign.Scheme = &scheme{} - -// Scheme returns a signature interface. -func Scheme() sign.Scheme { return sch } - -type scheme struct{} - -func (*scheme) Name() string { return "Ed25519" } -func (*scheme) PublicKeySize() int { return PublicKeySize } -func (*scheme) PrivateKeySize() int { return PrivateKeySize } -func (*scheme) SignatureSize() int { return SignatureSize } -func (*scheme) SeedSize() int { return SeedSize } -func (*scheme) TLSIdentifier() uint { return 0x0807 } -func (*scheme) SupportsContext() bool { return false } -func (*scheme) Oid() asn1.ObjectIdentifier { - return asn1.ObjectIdentifier{1, 3, 101, 112} -} - -func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { - return GenerateKey(rand.Reader) -} - -func (*scheme) Sign( - sk sign.PrivateKey, - message []byte, - opts *sign.SignatureOpts, -) []byte { - priv, ok := sk.(PrivateKey) - if !ok { - panic(sign.ErrTypeMismatch) - } - if opts != nil && opts.Context != "" { - panic(sign.ErrContextNotSupported) - } - return Sign(priv, message) -} - -func (*scheme) Verify( - pk sign.PublicKey, - message, signature []byte, - opts *sign.SignatureOpts, -) bool { - pub, ok := pk.(PublicKey) - if !ok { - panic(sign.ErrTypeMismatch) - } - if opts != nil { - if opts.Context != "" { - panic(sign.ErrContextNotSupported) - } - } - return Verify(pub, message, signature) -} - -func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { - privateKey := NewKeyFromSeed(seed) - publicKey := make(PublicKey, PublicKeySize) - copy(publicKey, privateKey[SeedSize:]) - return publicKey, privateKey -} - -func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { - if len(buf) < PublicKeySize { - return nil, sign.ErrPubKeySize - } - pub := make(PublicKey, PublicKeySize) - copy(pub, buf[:PublicKeySize]) - return pub, nil -} - -func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { - if len(buf) < PrivateKeySize { - return nil, sign.ErrPrivKeySize - } - priv := make(PrivateKey, PrivateKeySize) - copy(priv, buf[:PrivateKeySize]) - return priv, nil -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go deleted file mode 100644 index 8763b426fc..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go +++ /dev/null @@ -1,213 +0,0 @@ -package ed25519 - -import fp "github.com/cloudflare/circl/math/fp25519" - -var tabSign = [fxV][fx2w1]pointR3{ - { - pointR3{ - addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, - subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, - dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, - }, - { - addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72}, - subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35}, - dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44}, - }, - { - addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e}, - subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12}, - dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35}, - }, - { - addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d}, - subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a}, - dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45}, - }, - }, - { - { - addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41}, - subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d}, - dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19}, - }, - { - addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b}, - subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53}, - dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31}, - }, - { - addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33}, - subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c}, - dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e}, - }, - { - addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23}, - subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76}, - dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19}, - }, - }, -} - -var tabVerif = [1 << (omegaFix - 2)]pointR3{ - { /* 1P */ - addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, - subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, - dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, - }, - { /* 3P */ - addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a}, - subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a}, - dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a}, - }, - { /* 5P */ - addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29}, - subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15}, - dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43}, - }, - { /* 7P */ - addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46}, - subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d}, - dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a}, - }, - { /* 9P */ - addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34}, - subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49}, - dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73}, - }, - { /* 11P */ - addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42}, - subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18}, - dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d}, - }, - { /* 13P */ - addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23}, - subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02}, - dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49}, - }, - { /* 15P */ - addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61}, - subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43}, - dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51}, - }, - { /* 17P */ - addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d}, - subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71}, - dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47}, - }, - { /* 19P */ - addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c}, - subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21}, - dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48}, - }, - { /* 21P */ - addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55}, - subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77}, - dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a}, - }, - { /* 23P */ - addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44}, - subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01}, - dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15}, - }, - { /* 25P */ - addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b}, - subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78}, - dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72}, - }, - { /* 27P */ - addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79}, - subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c}, - dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70}, - }, - { /* 29P */ - addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06}, - subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13}, - dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c}, - }, - { /* 31P */ - addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a}, - subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a}, - dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28}, - }, - { /* 33P */ - addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32}, - subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76}, - dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56}, - }, - { /* 35P */ - addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa}, - subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03}, - dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20}, - }, - { /* 37P */ - addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75}, - subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69}, - dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54}, - }, - { /* 39P */ - addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d}, - subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78}, - dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70}, - }, - { /* 41P */ - addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c}, - subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40}, - dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20}, - }, - { /* 43P */ - addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b}, - subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38}, - dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04}, - }, - { /* 45P */ - addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79}, - subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51}, - dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c}, - }, - { /* 47P */ - addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e}, - subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75}, - dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71}, - }, - { /* 49P */ - addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41}, - subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e}, - dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f}, - }, - { /* 51P */ - addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32}, - subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62}, - dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b}, - }, - { /* 53P */ - addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a}, - subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a}, - dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a}, - }, - { /* 55P */ - addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c}, - subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11}, - dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05}, - }, - { /* 57P */ - addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28}, - subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e}, - dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a}, - }, - { /* 59P */ - addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12}, - subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d}, - dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33}, - }, - { /* 61P */ - addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21}, - subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23}, - dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d}, - }, - { /* 63P */ - addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a}, - subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31}, - dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e}, - }, -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go deleted file mode 100644 index c368b181b4..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go +++ /dev/null @@ -1,411 +0,0 @@ -// Package ed448 implements Ed448 signature scheme as described in RFC-8032. -// -// This package implements two signature variants. -// -// | Scheme Name | Sign Function | Verification | Context | -// |-------------|-------------------|---------------|-------------------| -// | Ed448 | Sign | Verify | Yes, can be empty | -// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty | -// | All above | (PrivateKey).Sign | VerifyAny | As above | -// -// Specific functions for sign and verify are defined. A generic signing -// function for all schemes is available through the crypto.Signer interface, -// which is implemented by the PrivateKey type. A correspond all-in-one -// verification method is provided by the VerifyAny function. -// -// Both schemes require a context string for domain separation. This parameter -// is passed using a SignerOptions struct defined in this package. -// -// References: -// -// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt -// - EdDSA for more curves: https://eprint.iacr.org/2015/677 -// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1 -package ed448 - -import ( - "bytes" - "crypto" - cryptoRand "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "github.com/cloudflare/circl/ecc/goldilocks" - "github.com/cloudflare/circl/internal/sha3" - "github.com/cloudflare/circl/sign" -) - -const ( - // ContextMaxSize is the maximum length (in bytes) allowed for context. - ContextMaxSize = 255 - // PublicKeySize is the length in bytes of Ed448 public keys. - PublicKeySize = 57 - // PrivateKeySize is the length in bytes of Ed448 private keys. - PrivateKeySize = 114 - // SignatureSize is the length in bytes of signatures. - SignatureSize = 114 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 57 -) - -const ( - paramB = 456 / 8 // Size of keys in bytes. - hashSize = 2 * paramB // Size of the hash function's output. -) - -// SignerOptions implements crypto.SignerOpts and augments with parameters -// that are specific to the Ed448 signature schemes. -type SignerOptions struct { - // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph. - crypto.Hash - - // Context is an optional domain separation string for signing. - // Its length must be less or equal than 255 bytes. - Context string - - // Scheme is an identifier for choosing a signature scheme. - Scheme SchemeID -} - -// SchemeID is an identifier for each signature scheme. -type SchemeID uint - -const ( - ED448 SchemeID = iota - ED448Ph -) - -// PublicKey is the type of Ed448 public keys. -type PublicKey []byte - -// Equal reports whether pub and x have the same value. -func (pub PublicKey) Equal(x crypto.PublicKey) bool { - xx, ok := x.(PublicKey) - return ok && bytes.Equal(pub, xx) -} - -// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Equal reports whether priv and x have the same value. -func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { - xx, ok := x.(PrivateKey) - return ok && subtle.ConstantTimeCompare(priv, xx) == 1 -} - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[SeedSize:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:SeedSize]) - return seed -} - -func (priv PrivateKey) Scheme() sign.Scheme { return sch } - -func (pub PublicKey) Scheme() sign.Scheme { return sch } - -func (priv PrivateKey) MarshalBinary() (data []byte, err error) { - privateKey := make(PrivateKey, PrivateKeySize) - copy(privateKey, priv) - return privateKey, nil -} - -func (pub PublicKey) MarshalBinary() (data []byte, err error) { - publicKey := make(PublicKey, PublicKeySize) - copy(publicKey, pub) - return publicKey, nil -} - -// Sign creates a signature of a message given a key pair. -// This function supports all the two signature variants defined in RFC-8032, -// namely Ed448 (or pure EdDSA) and Ed448Ph. -// The opts.HashFunc() must return zero to the specify Ed448 variant. This can -// be achieved by passing crypto.Hash(0) as the value for opts. -// Use an Options struct to pass a bool indicating that the ed448Ph variant -// should be used. -// The struct can also be optionally used to pass a context string for signing. -func (priv PrivateKey) Sign( - rand io.Reader, - message []byte, - opts crypto.SignerOpts, -) (signature []byte, err error) { - var ctx string - var scheme SchemeID - - if o, ok := opts.(SignerOptions); ok { - ctx = o.Context - scheme = o.Scheme - } - - switch true { - case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): - return Sign(priv, message, ctx), nil - case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): - return SignPh(priv, message, ctx), nil - default: - return nil, errors.New("ed448: bad hash algorithm") - } -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptoRand.Reader - } - - seed := make(PrivateKey, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[SeedSize:]) - - return publicKey, privateKey, nil -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - privateKey := make([]byte, PrivateKeySize) - newKeyFromSeed(privateKey, seed) - return privateKey -} - -func newKeyFromSeed(privateKey, seed []byte) { - if l := len(seed); l != SeedSize { - panic("ed448: bad seed length: " + strconv.Itoa(l)) - } - - var h [hashSize]byte - H := sha3.NewShake256() - _, _ = H.Write(seed) - _, _ = H.Read(h[:]) - s := &goldilocks.Scalar{} - deriveSecretScalar(s, h[:paramB]) - - copy(privateKey[:SeedSize], seed) - _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:]) -} - -func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { - if len(ctx) > ContextMaxSize { - panic(fmt.Errorf("ed448: bad context length: %v", len(ctx))) - } - - H := sha3.NewShake256() - var PHM []byte - - if preHash { - var h [64]byte - _, _ = H.Write(message) - _, _ = H.Read(h[:]) - PHM = h[:] - H.Reset() - } else { - PHM = message - } - - // 1. Hash the 57-byte private key using SHAKE256(x, 114). - var h [hashSize]byte - _, _ = H.Write(privateKey[:SeedSize]) - _, _ = H.Read(h[:]) - s := &goldilocks.Scalar{} - deriveSecretScalar(s, h[:paramB]) - prefix := h[paramB:] - - // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114). - var rPM [hashSize]byte - H.Reset() - - writeDom(&H, ctx, preHash) - - _, _ = H.Write(prefix) - _, _ = H.Write(PHM) - _, _ = H.Read(rPM[:]) - - // 3. Compute the point [r]B. - r := &goldilocks.Scalar{} - r.FromBytes(rPM[:]) - R := (&[paramB]byte{})[:] - if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil { - panic(err) - } - // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114) - var hRAM [hashSize]byte - H.Reset() - - writeDom(&H, ctx, preHash) - - _, _ = H.Write(R) - _, _ = H.Write(privateKey[SeedSize:]) - _, _ = H.Write(PHM) - _, _ = H.Read(hRAM[:]) - - // 5. Compute S = (r + k * s) mod order. - k := &goldilocks.Scalar{} - k.FromBytes(hRAM[:]) - S := &goldilocks.Scalar{} - S.Mul(k, s) - S.Add(S, r) - - // 6. The signature is the concatenation of R and S. - copy(signature[:paramB], R[:]) - copy(signature[paramB:], S[:]) -} - -// Sign signs the message with privateKey and returns a signature. -// This function supports the signature variant defined in RFC-8032: Ed448, -// also known as the pure version of EdDSA. -// It will panic if len(privateKey) is not PrivateKeySize. -func Sign(priv PrivateKey, message []byte, ctx string) []byte { - signature := make([]byte, SignatureSize) - signAll(signature, priv, message, []byte(ctx), false) - return signature -} - -// SignPh creates a signature of a message given a keypair. -// This function supports the signature variant defined in RFC-8032: Ed448ph, -// meaning it internally hashes the message using SHAKE-256. -// Context could be passed to this function, which length should be no more than -// 255. It can be empty. -func SignPh(priv PrivateKey, message []byte, ctx string) []byte { - signature := make([]byte, SignatureSize) - signAll(signature, priv, message, []byte(ctx), true) - return signature -} - -func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { - if len(public) != PublicKeySize || - len(signature) != SignatureSize || - len(ctx) > ContextMaxSize || - !isLessThanOrder(signature[paramB:]) { - return false - } - - P, err := goldilocks.FromBytes(public) - if err != nil { - return false - } - - H := sha3.NewShake256() - var PHM []byte - - if preHash { - var h [64]byte - _, _ = H.Write(message) - _, _ = H.Read(h[:]) - PHM = h[:] - H.Reset() - } else { - PHM = message - } - - var hRAM [hashSize]byte - R := signature[:paramB] - - writeDom(&H, ctx, preHash) - - _, _ = H.Write(R) - _, _ = H.Write(public) - _, _ = H.Write(PHM) - _, _ = H.Read(hRAM[:]) - - k := &goldilocks.Scalar{} - k.FromBytes(hRAM[:]) - S := &goldilocks.Scalar{} - S.FromBytes(signature[paramB:]) - - encR := (&[paramB]byte{})[:] - P.Neg() - _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR) - return bytes.Equal(R, encR) -} - -// VerifyAny returns true if the signature is valid. Failure cases are invalid -// signature, or when the public key cannot be decoded. -// This function supports all the two signature variants defined in RFC-8032, -// namely Ed448 (or pure EdDSA) and Ed448Ph. -// The opts.HashFunc() must return zero, this can be achieved by passing -// crypto.Hash(0) as the value for opts. -// Use a SignerOptions struct to pass a context string for signing. -func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { - var ctx string - var scheme SchemeID - if o, ok := opts.(SignerOptions); ok { - ctx = o.Context - scheme = o.Scheme - } - - switch true { - case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): - return Verify(public, message, signature, ctx) - case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): - return VerifyPh(public, message, signature, ctx) - default: - return false - } -} - -// Verify returns true if the signature is valid. Failure cases are invalid -// signature, or when the public key cannot be decoded. -// This function supports the signature variant defined in RFC-8032: Ed448, -// also known as the pure version of EdDSA. -func Verify(public PublicKey, message, signature []byte, ctx string) bool { - return verify(public, message, signature, []byte(ctx), false) -} - -// VerifyPh returns true if the signature is valid. Failure cases are invalid -// signature, or when the public key cannot be decoded. -// This function supports the signature variant defined in RFC-8032: Ed448ph, -// meaning it internally hashes the message using SHAKE-256. -// Context could be passed to this function, which length should be no more than -// 255. It can be empty. -func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { - return verify(public, message, signature, []byte(ctx), true) -} - -func deriveSecretScalar(s *goldilocks.Scalar, h []byte) { - h[0] &= 0xFC // The two least significant bits of the first octet are cleared, - h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and - h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set. - s.FromBytes(h[:paramB]) -} - -// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero. -func isLessThanOrder(x []byte) bool { - order := goldilocks.Curve{}.Order() - i := len(order) - 1 - for i > 0 && x[i] == order[i] { - i-- - } - return x[paramB-1] == 0 && x[i] < order[i] -} - -func writeDom(h io.Writer, ctx []byte, preHash bool) { - dom4 := "SigEd448" - _, _ = h.Write([]byte(dom4)) - - if preHash { - _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) - } else { - _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) - } - _, _ = h.Write(ctx) -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go deleted file mode 100644 index 22da8bc0a5..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go +++ /dev/null @@ -1,87 +0,0 @@ -package ed448 - -import ( - "crypto/rand" - "encoding/asn1" - - "github.com/cloudflare/circl/sign" -) - -var sch sign.Scheme = &scheme{} - -// Scheme returns a signature interface. -func Scheme() sign.Scheme { return sch } - -type scheme struct{} - -func (*scheme) Name() string { return "Ed448" } -func (*scheme) PublicKeySize() int { return PublicKeySize } -func (*scheme) PrivateKeySize() int { return PrivateKeySize } -func (*scheme) SignatureSize() int { return SignatureSize } -func (*scheme) SeedSize() int { return SeedSize } -func (*scheme) TLSIdentifier() uint { return 0x0808 } -func (*scheme) SupportsContext() bool { return true } -func (*scheme) Oid() asn1.ObjectIdentifier { - return asn1.ObjectIdentifier{1, 3, 101, 113} -} - -func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { - return GenerateKey(rand.Reader) -} - -func (*scheme) Sign( - sk sign.PrivateKey, - message []byte, - opts *sign.SignatureOpts, -) []byte { - priv, ok := sk.(PrivateKey) - if !ok { - panic(sign.ErrTypeMismatch) - } - ctx := "" - if opts != nil { - ctx = opts.Context - } - return Sign(priv, message, ctx) -} - -func (*scheme) Verify( - pk sign.PublicKey, - message, signature []byte, - opts *sign.SignatureOpts, -) bool { - pub, ok := pk.(PublicKey) - if !ok { - panic(sign.ErrTypeMismatch) - } - ctx := "" - if opts != nil { - ctx = opts.Context - } - return Verify(pub, message, signature, ctx) -} - -func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { - privateKey := NewKeyFromSeed(seed) - publicKey := make(PublicKey, PublicKeySize) - copy(publicKey, privateKey[SeedSize:]) - return publicKey, privateKey -} - -func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { - if len(buf) < PublicKeySize { - return nil, sign.ErrPubKeySize - } - pub := make(PublicKey, PublicKeySize) - copy(pub, buf[:PublicKeySize]) - return pub, nil -} - -func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { - if len(buf) < PrivateKeySize { - return nil, sign.ErrPrivKeySize - } - priv := make(PrivateKey, PrivateKeySize) - copy(priv, buf[:PrivateKeySize]) - return priv, nil -} diff --git a/openshift/tools/vendor/github.com/cloudflare/circl/sign/sign.go b/openshift/tools/vendor/github.com/cloudflare/circl/sign/sign.go deleted file mode 100644 index 557d6f0960..0000000000 --- a/openshift/tools/vendor/github.com/cloudflare/circl/sign/sign.go +++ /dev/null @@ -1,113 +0,0 @@ -// Package sign provides unified interfaces for signature schemes. -// -// A register of schemes is available in the package -// -// github.com/cloudflare/circl/sign/schemes -package sign - -import ( - "crypto" - "encoding" - "errors" -) - -type SignatureOpts struct { - // If non-empty, includes the given context in the signature if supported - // and will cause an error during signing otherwise. - Context string -} - -// A public key is used to verify a signature set by the corresponding private -// key. -type PublicKey interface { - // Returns the signature scheme for this public key. - Scheme() Scheme - Equal(crypto.PublicKey) bool - encoding.BinaryMarshaler - crypto.PublicKey -} - -// A private key allows one to create signatures. -type PrivateKey interface { - // Returns the signature scheme for this private key. - Scheme() Scheme - Equal(crypto.PrivateKey) bool - // For compatibility with Go standard library - crypto.Signer - crypto.PrivateKey - encoding.BinaryMarshaler -} - -// A Scheme represents a specific instance of a signature scheme. -type Scheme interface { - // Name of the scheme. - Name() string - - // GenerateKey creates a new key-pair. - GenerateKey() (PublicKey, PrivateKey, error) - - // Creates a signature using the PrivateKey on the given message and - // returns the signature. opts are additional options which can be nil. - // - // Panics if key is nil or wrong type or opts context is not supported. - Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte - - // Checks whether the given signature is a valid signature set by - // the private key corresponding to the given public key on the - // given message. opts are additional options which can be nil. - // - // Panics if key is nil or wrong type or opts context is not supported. - Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool - - // Deterministically derives a keypair from a seed. If you're unsure, - // you're better off using GenerateKey(). - // - // Panics if seed is not of length SeedSize(). - DeriveKey(seed []byte) (PublicKey, PrivateKey) - - // Unmarshals a PublicKey from the provided buffer. - UnmarshalBinaryPublicKey([]byte) (PublicKey, error) - - // Unmarshals a PublicKey from the provided buffer. - UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error) - - // Size of binary marshalled public keys. - PublicKeySize() int - - // Size of binary marshalled public keys. - PrivateKeySize() int - - // Size of signatures. - SignatureSize() int - - // Size of seeds. - SeedSize() int - - // Returns whether contexts are supported. - SupportsContext() bool -} - -var ( - // ErrTypeMismatch is the error used if types of, for instance, private - // and public keys don't match. - ErrTypeMismatch = errors.New("types mismatch") - - // ErrSeedSize is the error used if the provided seed is of the wrong - // size. - ErrSeedSize = errors.New("wrong seed size") - - // ErrPubKeySize is the error used if the provided public key is of - // the wrong size. - ErrPubKeySize = errors.New("wrong size for public key") - - // ErrPrivKeySize is the error used if the provided private key is of - // the wrong size. - ErrPrivKeySize = errors.New("wrong size for private key") - - // ErrContextNotSupported is the error used if a context is not - // supported. - ErrContextNotSupported = errors.New("context not supported") - - // ErrContextTooLong is the error used if the context string is too long. - ErrContextTooLong = errors.New("context string too long") -) diff --git a/openshift/tools/vendor/github.com/google/btree/LICENSE b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE similarity index 100% rename from openshift/tools/vendor/github.com/google/btree/LICENSE rename to openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE diff --git a/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 0000000000..a9e1b72ba7 --- /dev/null +++ b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,756 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "runtime" + "strings" + "sync" + "sync/atomic" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +type GzipHelperFunc func(io.Reader) (io.ReadCloser, error) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression + ctx context.Context + minChunkSize int + gzipHelperFunc GzipHelperFunc +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + +// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers. +// When a gzip-compressed layer is detected, this function will be used instead of the +// Go standard library gzip decompression for better performance. +// The function should take an io.Reader as input and return an io.ReadCloser. +// If nil, the Go standard library gzip.NewReader will be used. +func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option { + return func(o *options) error { + o.gzipHelperFunc = gzipHelperFunc + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest + readCompleted *atomic.Bool + uncompressedSize *atomic.Int64 +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// UncompressedSize returns the size of uncompressed blob. +// UncompressedSize should only be called after the blob has been fully read. +func (b *Blob) UncompressedSize() (int64, error) { + switch { + case b.uncompressedSize == nil || b.readCompleted == nil: + return -1, fmt.Errorf("readCompleted or uncompressedSize is not initialized") + case !b.readCompleted.Load(): + return -1, fmt.Errorf("called UncompressedSize before the blob has been fully read") + default: + return b.uncompressedSize.Load(), nil + } +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) + } + } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + readCompleted := new(atomic.Bool) + uncompressedSize := new(atomic.Int64) + go func() { + var size int64 + var decompressFunc func(io.Reader) (io.ReadCloser, error) + if _, ok := opts.compression.(*gzipCompression); ok && opts.gzipHelperFunc != nil { + decompressFunc = opts.gzipHelperFunc + } else { + decompressFunc = opts.compression.Reader + } + decompressR, err := decompressFunc(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer decompressR.Close() + if size, err = io.Copy(diffID.Hash(), decompressR); err != nil { + pw.CloseWithError(err) + return + } + uncompressedSize.Store(size) + readCompleted.Store(true) + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + readCompleted: readCompleted, + uncompressedSize: uncompressedSize, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, fmt.Errorf("failed to sort: %w", err) + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + picked := make(map[string]struct{}) + for _, l := range prioritized { + if err := moveRec(l, intar, sorted, picked); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, fmt.Errorf("failed to sort tar entries: %w", err) + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump prioritized entries followed by the rest entries while skipping picked ones. + return append(sorted.dump(nil), intar.dump(picked)...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReadSeeker(in) + if err != nil { + return nil, fmt.Errorf("failed to make position watcher: %w", err) + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile, picked map[string]struct{}) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + if _, done := picked[name]; !done { + out.add(e) + picked[name] = struct{}{} + } + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + _, okPicked := picked[name] + if !okIn && !okOut && !okPicked { + return fmt.Errorf("file: %q: %w", name, errNotFound) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out, picked); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out, picked); err != nil { + return err + } + } + if _, done := picked[name]; done { + return nil + } + if e, ok := in.get(name); ok { + out.add(e) + picked[name] = struct{}{} + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump(skip map[string]struct{}) []*entry { + if len(skip) == 0 { + return f.stream + } + var out []*entry + for _, e := range f.stream { + if _, ok := skip[cleanEntryName(e.header.Name)]; ok { + continue + } + out = append(out, e) + } + return out +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { + pos := int64(0) + return &countReadSeeker{r: r, cPos: &pos}, nil +} + +type countReadSeeker struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReadSeeker) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReadSeeker) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + var dgR io.ReadCloser + var err error + if gzipHelperFunc != nil { + dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size())) + } else { + dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + } + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 0000000000..6de78b02dc --- /dev/null +++ b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 0000000000..ff91a37add --- /dev/null +++ b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1232 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocOffset >= 0 && tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + var chunkTopIndex int + for i, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 && e.InnerOffset == 0 { + lastOffset = e.Offset + } + } + + if len(r.m) == 0 { + r.m[""] = &TOCEntry{ + Name: "", + Type: "dir", + Mode: 0755, + NumLink: 1, + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + return &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + fr.preRead = preRead + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) + } + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") + } + return retN, retErr +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + uncompressedCounter *countWriteFlusher + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, fmt.Errorf("failed to parse footer: %w", err) + } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := io.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 0000000000..88fa13b191 --- /dev/null +++ b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) + } + defer zr.Close() + extra := zr.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 0000000000..ff165e090e --- /dev/null +++ b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2403 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" +) + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + TestStreams(t TestingT, b []byte, streams []int64) + DiffIDOf(TestingT, []byte) string + String() string +} + +// TestingT is the minimal set of testing.T required to run the +// tests defined in CompressionTestSuite. This interface exists to prevent +// leaking the testing package from being exposed outside tests. +type TestingT interface { + Errorf(format string, args ...any) + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Logf(format string, args ...any) + Parallel() +} + +// Runner allows running subtests of TestingT. This exists instead of adding +// a Run method to TestingT interface because the Run implementation of +// testing.T would not satisfy the interface. +type Runner func(t TestingT, name string, fn func(t TestingT)) + +type TestRunner struct { + TestingT + Runner Runner +} + +func (r *TestRunner) Run(name string, run func(*TestRunner)) { + r.Runner(r.TestingT, name, func(t TestingT) { + run(&TestRunner{TestingT: t, Runner: r.Runner}) + }) +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *TestRunner) { + t.Parallel() + testDigestAndVerify(t, controllers...) + }) + t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +type TestingControllerFactory func() TestingController + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { + tests := []struct { + name string + chunkSize int + minChunkSize []int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } + } +} + +func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := clb.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := io.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := io.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t TestingT, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + switch srcCompression { + case gzipType: + w = gzip.NewWriter(buf) + case zstdType: + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + default: + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t TestingT, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !aok || !bok || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset >= 0) == (b.Offset >= 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t TestingT, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) { + tests := []struct { + name string + tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int + }{ + { + name: "no-regfile", + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + minChunkSize: []int{0, 64000}, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + return + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + return + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *TestRunner) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, fmt.Errorf("error reading footer: %w", err) + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) + } + + // Decode the TOC JSON + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + data64KB := randomContents(64000) + + tests := []struct { + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + wantTOCVersion int // default = 1 + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + } + + for _, tt := range tests { + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) + w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl1.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) + r, err := Open( + sr, + WithDecompressors(cl1), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + if _, ok := r.Lookup(""); !ok { + t.Fatalf("failed to lookup rootdir: %v", err) + } + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { + t.Errorf("telemetry failure: %v", err) + } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func(needsGetTOC bool) error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t TestingT, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t TestingT, r *Reader) +} + +type stargzCheckFn func(TestingT, *Reader) + +func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t TestingT, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes)))) + if err != nil { + panic(err) + } + b[i] = runes[int(bi.Int64())] + } + return string(b) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t TestingT, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 0000000000..57e0aa614e --- /dev/null +++ b/openshift/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,342 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/openshift/tools/vendor/github.com/distribution/reference/.gitattributes b/openshift/tools/vendor/github.com/distribution/reference/.gitattributes deleted file mode 100644 index d207b1802b..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf diff --git a/openshift/tools/vendor/github.com/distribution/reference/.gitignore b/openshift/tools/vendor/github.com/distribution/reference/.gitignore deleted file mode 100644 index dc07e6b04a..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Cover profiles -*.out diff --git a/openshift/tools/vendor/github.com/distribution/reference/.golangci.yml b/openshift/tools/vendor/github.com/distribution/reference/.golangci.yml deleted file mode 100644 index 793f0bb7ec..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/.golangci.yml +++ /dev/null @@ -1,18 +0,0 @@ -linters: - enable: - - bodyclose - - dupword # Checks for duplicate words in the source code - - gofmt - - goimports - - ineffassign - - misspell - - revive - - staticcheck - - unconvert - - unused - - vet - disable: - - errcheck - -run: - deadline: 2m diff --git a/openshift/tools/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md b/openshift/tools/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md deleted file mode 100644 index 48f6704c6d..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md +++ /dev/null @@ -1,5 +0,0 @@ -# Code of Conduct - -We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). - -Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct. diff --git a/openshift/tools/vendor/github.com/distribution/reference/CONTRIBUTING.md b/openshift/tools/vendor/github.com/distribution/reference/CONTRIBUTING.md deleted file mode 100644 index ab21946656..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/CONTRIBUTING.md +++ /dev/null @@ -1,114 +0,0 @@ -# Contributing to the reference library - -## Community help - -If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack. -[Click here for an invite to the CNCF community slack](https://slack.cncf.io/) - -## Reporting security issues - -The maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of (or similar for other container tools): - - `docker version` - - `docker info` - - `docker exec registry --version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing Code - -Contributions should be made via pull requests. Pull requests will be reviewed -by one or more maintainers or reviewers and merged when acceptable. - -You should follow the basic GitHub workflow: - - 1. Use your own [fork](https://help.github.com/en/articles/about-forks) - 2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) - 3. Test your code - 4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) - 5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) - -Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) -for tips on creating a successful contribution. - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/openshift/tools/vendor/github.com/distribution/reference/GOVERNANCE.md b/openshift/tools/vendor/github.com/distribution/reference/GOVERNANCE.md deleted file mode 100644 index 200045b050..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/GOVERNANCE.md +++ /dev/null @@ -1,144 +0,0 @@ -# distribution/reference Project Governance - -Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here. - -For specific guidance on practical contribution steps please -see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide. - -## Maintainership - -There are different types of maintainers, with different responsibilities, but -all maintainers have 3 things in common: - -1) They share responsibility in the project's success. -2) They have made a long-term, recurring time investment to improve the project. -3) They spend that time doing whatever needs to be done, not necessarily what -is the most interesting or fun. - -Maintainers are often under-appreciated, because their work is harder to appreciate. -It's easy to appreciate a really cool and technically advanced feature. It's harder -to appreciate the absence of bugs, the slow but steady improvement in stability, -or the reliability of a release process. But those things distinguish a good -project from a great one. - -## Reviewers - -A reviewer is a core role within the project. -They share in reviewing issues and pull requests and their LGTM counts towards the -required LGTM count to merge a code change into the project. - -Reviewers are part of the organization but do not have write access. -Becoming a reviewer is a core aspect in the journey to becoming a maintainer. - -## Adding maintainers - -Maintainers are first and foremost contributors that have shown they are -committed to the long term success of a project. Contributors wanting to become -maintainers are expected to be deeply involved in contributing code, pull -request review, and triage of issues in the project for more than three months. - -Just contributing does not make you a maintainer, it is about building trust -with the current maintainers of the project and being a person that they can -depend on and trust to make decisions in the best interest of the project. - -Periodically, the existing maintainers curate a list of contributors that have -shown regular activity on the project over the prior months. From this list, -maintainer candidates are selected and proposed in a pull request or a -maintainers communication channel. - -After a candidate has been announced to the maintainers, the existing -maintainers are given five business days to discuss the candidate, raise -objections and cast their vote. Votes may take place on the communication -channel or via pull request comment. Candidates must be approved by at least 66% -of the current maintainers by adding their vote on the mailing list. The -reviewer role has the same process but only requires 33% of current maintainers. -Only maintainers of the repository that the candidate is proposed for are -allowed to vote. - -If a candidate is approved, a maintainer will contact the candidate to invite -the candidate to open a pull request that adds the contributor to the -MAINTAINERS file. The voting process may take place inside a pull request if a -maintainer has already discussed the candidacy with the candidate and a -maintainer is willing to be a sponsor by opening the pull request. The candidate -becomes a maintainer once the pull request is merged. - -## Stepping down policy - -Life priorities, interests, and passions can change. If you're a maintainer but -feel you must remove yourself from the list, inform other maintainers that you -intend to step down, and if possible, help find someone to pick up your work. -At the very least, ensure your work can be continued where you left off. - -After you've informed other maintainers, create a pull request to remove -yourself from the MAINTAINERS file. - -## Removal of inactive maintainers - -Similar to the procedure for adding new maintainers, existing maintainers can -be removed from the list if they do not show significant activity on the -project. Periodically, the maintainers review the list of maintainers and their -activity over the last three months. - -If a maintainer has shown insufficient activity over this period, a neutral -person will contact the maintainer to ask if they want to continue being -a maintainer. If the maintainer decides to step down as a maintainer, they -open a pull request to be removed from the MAINTAINERS file. - -If the maintainer wants to remain a maintainer, but is unable to perform the -required duties they can be removed with a vote of at least 66% of the current -maintainers. In this case, maintainers should first propose the change to -maintainers via the maintainers communication channel, then open a pull request -for voting. The voting period is five business days. The voting pull request -should not come as a surpise to any maintainer and any discussion related to -performance must not be discussed on the pull request. - -## How are decisions made? - -Docker distribution is an open-source project with an open design philosophy. -This means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, road map, and APIs. *If it's part of -the project, it's in the repo. If it's in the repo, it's part of the project.* - -As a result, all decisions can be expressed as changes to the repository. An -implementation change is a change to the source code. An API change is a change -to the API specification. A philosophy change is a change to the philosophy -manifesto, and so on. - -All decisions affecting distribution, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Merge or refuse the pull request. Who does this depends on the nature -of the pull request and which areas of the project it affects. - -## Helping contributors with the DCO - -The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some contributors are not as familiar with `git`, or have used a web -based editor, and thus asking them to `git commit --amend -s` is not the best -way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. -The most trivial way for a contributor to allow the maintainer to do this, is to -add a DCO signature in a pull requests's comment, or a maintainer can simply -note that the change is sufficiently trivial that it does not substantially -change the existing contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. - -## I'm a maintainer. Should I make pull requests too? - -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. - -## Conflict Resolution - -If you have a technical dispute that you feel has reached an impasse with a -subset of the community, any contributor may open an issue, specifically -calling for a resolution vote of the current core maintainers to resolve the -dispute. The same voting quorums required (2/3) for adding and removing -maintainers will apply to conflict resolution. diff --git a/openshift/tools/vendor/github.com/distribution/reference/MAINTAINERS b/openshift/tools/vendor/github.com/distribution/reference/MAINTAINERS deleted file mode 100644 index 9e0a60c8bd..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/MAINTAINERS +++ /dev/null @@ -1,26 +0,0 @@ -# Distribution project maintainers & reviewers -# -# See GOVERNANCE.md for maintainer versus reviewer roles -# -# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io) -# GitHub ID, Name, Email address -"chrispat","Chris Patterson","chrispat@github.com" -"clarkbw","Bryan Clark","clarkbw@github.com" -"corhere","Cory Snider","csnider@mirantis.com" -"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com" -"heww","He Weiwei","hweiwei@vmware.com" -"joaodrp","João Pereira","jpereira@gitlab.com" -"justincormack","Justin Cormack","justin.cormack@docker.com" -"squizzi","Kyle Squizzato","ksquizzato@mirantis.com" -"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com" -"sargun","Sargun Dhillon","sargun@sargun.me" -"wy65701436","Wang Yan","wangyan@vmware.com" -"stevelasker","Steve Lasker","steve.lasker@microsoft.com" -# -# REVIEWERS -# GitHub ID, Name, Email address -"dmcgowan","Derek McGowan","derek@mcgstyle.net" -"stevvooe","Stephen Day","stevvooe@gmail.com" -"thajeztah","Sebastiaan van Stijn","github@gone.nl" -"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com" -"Jamstah", "James Hewitt", "james.hewitt@gmail.com" diff --git a/openshift/tools/vendor/github.com/distribution/reference/Makefile b/openshift/tools/vendor/github.com/distribution/reference/Makefile deleted file mode 100644 index c78576b75d..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -# Project packages. -PACKAGES=$(shell go list ./...) - -# Flags passed to `go test` -BUILDFLAGS ?= -TESTFLAGS ?= - -.PHONY: all build test coverage -.DEFAULT: all - -all: build - -build: ## no binaries to build, so just check compilation suceeds - go build ${BUILDFLAGS} ./... - -test: ## run tests - go test ${TESTFLAGS} ./... - -coverage: ## generate coverprofiles from the unit tests - rm -f coverage.txt - go test ${TESTFLAGS} -cover -coverprofile=cover.out ./... - -.PHONY: help -help: - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/openshift/tools/vendor/github.com/distribution/reference/README.md b/openshift/tools/vendor/github.com/distribution/reference/README.md deleted file mode 100644 index 172a02e0b3..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Distribution reference - -Go library to handle references to container images. - - - -[![Build Status](https://github.com/distribution/reference/actions/workflows/test.yml/badge.svg?branch=main&event=push)](https://github.com/distribution/reference/actions?query=workflow%3ACI) -[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/reference) -[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE) -[![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) -[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) - -This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. - -## Contribution - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. - -## Communication - -For async communication and long running discussions please use issues and pull requests on the github repo. -This will be the best place to discuss design and implementation. - -For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/) -that everyone is welcome to join and chat about development. - -## Licenses - -The distribution codebase is released under the [Apache 2.0 license](LICENSE). diff --git a/openshift/tools/vendor/github.com/distribution/reference/SECURITY.md b/openshift/tools/vendor/github.com/distribution/reference/SECURITY.md deleted file mode 100644 index aaf983c0f0..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -## Reporting a Vulnerability - -The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! - -Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io. diff --git a/openshift/tools/vendor/github.com/distribution/reference/distribution-logo.svg b/openshift/tools/vendor/github.com/distribution/reference/distribution-logo.svg deleted file mode 100644 index cc9f4073b9..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/distribution-logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/openshift/tools/vendor/github.com/distribution/reference/helpers.go b/openshift/tools/vendor/github.com/distribution/reference/helpers.go deleted file mode 100644 index d10c7ef838..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See [path.Match] for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/openshift/tools/vendor/github.com/distribution/reference/normalize.go b/openshift/tools/vendor/github.com/distribution/reference/normalize.go deleted file mode 100644 index f4128314c1..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/normalize.go +++ /dev/null @@ -1,255 +0,0 @@ -package reference - -import ( - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // legacyDefaultDomain is the legacy domain for Docker Hub (which was - // originally named "the Docker Index"). This domain is still used for - // authentication and image search, which were part of the "v1" Docker - // registry specification. - // - // This domain will continue to be supported, but there are plans to consolidate - // legacy domains to new "canonical" domains. Once those domains are decided - // on, we must update the normalization functions, but preserve compatibility - // with existing installs, clients, and user configuration. - legacyDefaultDomain = "index.docker.io" - - // defaultDomain is the default domain used for images on Docker Hub. - // It is used to normalize "familiar" names to canonical names, for example, - // to convert "ubuntu" to "docker.io/library/ubuntu:latest". - // - // Note that actual domain of Docker Hub's registry is registry-1.docker.io. - // This domain will continue to be supported, but there are plans to consolidate - // legacy domains to new "canonical" domains. Once those domains are decided - // on, we must update the normalization functions, but preserve compatibility - // with existing installs, clients, and user configuration. - defaultDomain = "docker.io" - - // officialRepoPrefix is the namespace used for official images on Docker Hub. - // It is used to normalize "familiar" names to canonical names, for example, - // to convert "ubuntu" to "docker.io/library/ubuntu:latest". - officialRepoPrefix = "library/" - - // defaultTag is the default tag if no tag is provided. - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remote string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remote = remainder[:tagSep] - } else { - remote = remainder - } - if strings.ToLower(remote) != remote { - return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote) - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// namedTaggedDigested is a reference that has both a tag and a digest. -type namedTaggedDigested interface { - NamedTagged - Digested -} - -// ParseDockerRef normalizes the image reference following the docker convention, -// which allows for references to contain both a tag and a digest. It returns a -// reference that is either tagged or digested. For references containing both -// a tag and a digest, it returns a digested reference. For example, the following -// reference: -// -// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// Is returned as a digested reference (with the ":latest" tag removed): -// -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// References that are already "tagged" or "digested" are returned unmodified: -// -// // Already a digested reference -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// // Already a named reference -// docker.io/library/busybox:latest -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if canonical, ok := named.(namedTaggedDigested); ok { - // The reference is both tagged and digested; only return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - return WithDigest(newNamed, canonical.Digest()) - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remote-name. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remoteName string) { - maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/") - if !ok { - // Fast-path for single element ("familiar" names), such as "ubuntu" - // or "ubuntu:latest". Familiar names must be handled separately, to - // prevent them from being handled as "hostname:port". - // - // Canonicalize them as "docker.io/library/name[:tag]" - - // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain. - return defaultDomain, officialRepoPrefix + name - } - - switch { - case maybeDomain == localhost: - // localhost is a reserved namespace and always considered a domain. - domain, remoteName = maybeDomain, maybeRemoteName - case maybeDomain == legacyDefaultDomain: - // canonicalize the Docker Hub and legacy "Docker Index" domains. - domain, remoteName = defaultDomain, maybeRemoteName - case strings.ContainsAny(maybeDomain, ".:"): - // Likely a domain or IP-address: - // - // - contains a "." (e.g., "example.com" or "127.0.0.1") - // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000") - domain, remoteName = maybeDomain, maybeRemoteName - case strings.ToLower(maybeDomain) != maybeDomain: - // Uppercase namespaces are not allowed, so if the first element - // is not lowercase, we assume it to be a domain-name. - domain, remoteName = maybeDomain, maybeRemoteName - default: - // None of the above: it's not a domain, so use the default, and - // use the name input the remote-name. - domain, remoteName = defaultDomain, name - } - - if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') { - // Canonicalize "familiar" names, but only on Docker Hub, not - // on other domains: - // - // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]" - remoteName = officialRepoPrefix + remoteName - } - - return domain, remoteName -} - -// familiarizeName returns a shortened version of the name familiar -// to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if strings.HasPrefix(repo.path, officialRepoPrefix) { - // TODO(thaJeztah): this check may be too strict, as it assumes the - // "library/" namespace does not have nested namespaces. While this - // is true (currently), technically it would be possible for Docker - // Hub to use those (e.g. "library/distros/ubuntu:latest"). - // See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785. - if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') { - repo.path = remainder - } - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/openshift/tools/vendor/github.com/distribution/reference/reference.go b/openshift/tools/vendor/github.com/distribution/reference/reference.go deleted file mode 100644 index 900398bde7..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/reference.go +++ /dev/null @@ -1,432 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] remote-name -// domain := host [':' port-number] -// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A -// domain-name := domain-component ['.' domain-component]* -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// path (or "remote-name") := path-component ['/' path-component]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name. - RepositoryNameTotalLengthMax = 255 - - // NameTotalLengthMax is the maximum total number of characters in a repository name. - // - // Deprecated: use [RepositoryNameTotalLengthMax] instead. - NameTotalLengthMax = RepositoryNameTotalLengthMax -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the [Named] reference. -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the [Named] reference. -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -// splitDomain splits a named reference into a hostname and path string. -// If no valid hostname is found, the hostname is empty and the full value -// is returned as name -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - if len(repo.path) > RepositoryNameTotalLengthMax { - return nil, ErrNameTooLong - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - - if len(match[2]) > RepositoryNameTotalLengthMax { - return nil, ErrNameTooLong - } - - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - repo := repository{} - if r, ok := ref.(namedRepository); ok { - repo.domain, repo.path = r.Domain(), r.Path() - } else { - repo.domain, repo.path = splitDomain(ref.Name()) - } - return repo -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/openshift/tools/vendor/github.com/distribution/reference/regexp.go b/openshift/tools/vendor/github.com/distribution/reference/regexp.go deleted file mode 100644 index 65bc49d79b..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/regexp.go +++ /dev/null @@ -1,163 +0,0 @@ -package reference - -import ( - "regexp" - "strings" -) - -// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). -var DigestRegexp = regexp.MustCompile(digestPat) - -// DomainRegexp matches hostname or IP-addresses, optionally including a port -// number. It defines the structure of potential domain components that may be -// part of image names. This is purposely a subset of what is allowed by DNS to -// ensure backwards compatibility with Docker image names. It may be a subset of -// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between -// square brackets (excluding zone identifiers as defined by [RFC 6874] or special -// addresses such as IPv4-Mapped). -// -// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. -var DomainRegexp = regexp.MustCompile(domainAndPort) - -// IdentifierRegexp is the format for string identifier used as a -// content addressable identifier using sha256. These identifiers -// are like digests without the algorithm, since sha256 is used. -var IdentifierRegexp = regexp.MustCompile(identifier) - -// NameRegexp is the format for the name component of references, including -// an optional domain and port, but without tag or digest suffix. -var NameRegexp = regexp.MustCompile(namePat) - -// ReferenceRegexp is the full supported format of a reference. The regexp -// is anchored and has capturing groups for name, tag, and digest -// components. -var ReferenceRegexp = regexp.MustCompile(referencePat) - -// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. -// -// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 -var TagRegexp = regexp.MustCompile(tag) - -const ( - // alphanumeric defines the alphanumeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphanumeric = `[a-z0-9]+` - - // separator defines the separators allowed to be embedded in name - // components. This allows one period, one or two underscore and multiple - // dashes. Repeated dashes and underscores are intentionally treated - // differently. In order to support valid hostnames as name components, - // supporting repeated dash was added. Additionally double underscore is - // now allowed as a separator to loosen the restriction for previously - // supported names. - separator = `(?:[._]|__|[-]+)` - - // localhost is treated as a special value for domain-name. Any other - // domain-name without a "." or a ":port" are considered a path component. - localhost = `localhost` - - // domainNameComponent restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp. - domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` - - // optionalPort matches an optional port-number including the port separator - // (e.g. ":80"). - optionalPort = `(?::[0-9]+)?` - - // tag matches valid tag names. From docker/docker:graph/tags.go. - tag = `[\w][\w.-]{0,127}` - - // digestPat matches well-formed digests, including algorithm (e.g. "sha256:"). - // - // TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp - // so that go-digest defines the canonical format. Note that the go-digest is - // more relaxed: - // - it allows multiple algorithms (e.g. "sha256+b64:") to allow - // future expansion of supported algorithms. - // - it allows the "" value to use urlsafe base64 encoding as defined - // in [rfc4648, section 5]. - // - // [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5. - digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` - - // identifier is the format for a content addressable identifier using sha256. - // These identifiers are like digests without the algorithm, since sha256 is used. - identifier = `([a-f0-9]{64})` - - // ipv6address are enclosed between square brackets and may be represented - // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format - // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as - // IPv4-Mapped are deliberately excluded. - ipv6address = `\[(?:[a-fA-F0-9:]+)\]` -) - -var ( - // domainName defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. This includes IPv4 addresses on decimal format. - domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent) - - // host defines the structure of potential domains based on the URI - // Host subcomponent on rfc3986. It may be a subset of DNS domain name, - // or an IPv4 address in decimal format, or an IPv6 address between square - // brackets (excluding zone identifiers as defined by rfc6874 or special - // addresses such as IPv4-Mapped). - host = `(?:` + domainName + `|` + ipv6address + `)` - - // allowed by the URI Host subcomponent on rfc3986 to ensure backwards - // compatibility with Docker image names. - domainAndPort = host + optionalPort - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = regexp.MustCompile(anchored(tag)) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat)) - - // pathComponent restricts path-components to start with an alphanumeric - // character, with following parts able to be separated by a separator - // (one period, one or two underscore and multiple dashes). - pathComponent = alphanumeric + anyTimes(separator+alphanumeric) - - // remoteName matches the remote-name of a repository. It consists of one - // or more forward slash (/) delimited path-components: - // - // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu" - remoteName = pathComponent + anyTimes(`/`+pathComponent) - namePat = optional(domainAndPort+`/`) + remoteName - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName))) - - referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat))) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier)) -) - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...string) string { - return `(?:` + strings.Join(res, "") + `)?` -} - -// anyTimes wraps the expression in a non-capturing group that can occur -// any number of times. -func anyTimes(res ...string) string { - return `(?:` + strings.Join(res, "") + `)*` -} - -// capture wraps the expression in a capturing group. -func capture(res ...string) string { - return `(` + strings.Join(res, "") + `)` -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...string) string { - return `^` + strings.Join(res, "") + `$` -} diff --git a/openshift/tools/vendor/github.com/distribution/reference/sort.go b/openshift/tools/vendor/github.com/distribution/reference/sort.go deleted file mode 100644 index 416c37b076..0000000000 --- a/openshift/tools/vendor/github.com/distribution/reference/sort.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package reference - -import ( - "sort" -) - -// Sort sorts string references preferring higher information references. -// -// The precedence is as follows: -// -// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:") -// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest") -// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:") -// 4. [Named] (e.g., "docker.io/library/busybox") -// 5. [Digested] (e.g., "docker.io@sha256:") -// 6. Parse error -func Sort(references []string) []string { - var prefs []Reference - var bad []string - - for _, ref := range references { - pref, err := ParseAnyReference(ref) - if err != nil { - bad = append(bad, ref) - } else { - prefs = append(prefs, pref) - } - } - sort.Slice(prefs, func(a, b int) bool { - ar := refRank(prefs[a]) - br := refRank(prefs[b]) - if ar == br { - return prefs[a].String() < prefs[b].String() - } - return ar < br - }) - sort.Strings(bad) - var refs []string - for _, pref := range prefs { - refs = append(refs, pref.String()) - } - return append(refs, bad...) -} - -func refRank(ref Reference) uint8 { - if _, ok := ref.(Named); ok { - if _, ok = ref.(Tagged); ok { - if _, ok = ref.(Digested); ok { - return 1 - } - return 2 - } - if _, ok = ref.(Digested); ok { - return 3 - } - return 4 - } - return 5 -} diff --git a/openshift/tools/vendor/github.com/docker/cli/AUTHORS b/openshift/tools/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 0000000000..57af08b204 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,945 @@ +# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See scripts/docs/generate-authors.sh to make modifications. + +A. Lester Buck III +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Abreto FU +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Adyanth Hosavalike +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akhil Mohan +Akihiro Suda +Akim Demaille +Alan Thompson +Alano Terblanche +Albert Callarisa +Alberto Roura +Albin Kerouanton +Aleksa Sarai +Aleksander Piotrowski +Alessandro Boch +Alex Couture-Beil +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Chneerov +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alexey Igrychev +Alexis Couvreur +Alfred Landrum +Ali Rostami +Alicia Lauerman +Allen Sun +Allie Sadler +Alvin Deng +Amen Belayneh +Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anca Iordache +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andres G. Aragoneses +Andres Leon Rangel +Andrew France +Andrew He +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrew-Zipperer +Andrey Petrov +Andrii Berehuliak +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Archimedes Trajano +Arko Dasgupta +Arnaud Porterie +Arnaud Rebillout +Arthur Flageul +Arthur Peka +Ashly Mathew +Ashwini Oruganti +Aslam Ahemad +Austin Vazquez +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bodenmiller +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benjamin Böhmke +Benjamin Nater +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Bishal Das +Bjorn Neergaard +Boaz Shuster +Boban Acimovic +Bogdan Anton +Boris Pruessmann +Brad Baker +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Brian Tracy +Brian Wieder +Bruno Sousa +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Calvin Liu +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +carsontham +Carston Schilds +Casey Korver +Ce Gao +Cedric Davies +Cesar Talledo +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +Charlotte Mach +ChaYoung You +Chee Hau Lim +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Chinchilla +Chris Couzens +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Vermilion +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christopher Petito <47751006+krissetto@users.noreply.github.com> +Christopher Petito +Christopher Svensson +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Conner Crosby +Corey Farrell +Corey Quon +Cory Bennet +Cory Snider +Craig Osterhout +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +Daisuke Ito +dalanlan +Damien Nadé +Dan Cotora +Dan Wallis +Danial Gharib +Daniel Artine +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Daniil Nikolenko +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Alvarez +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Karlsson +David le Blanc +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> +Derek McGowan +Des Preston +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dilep Dev <34891655+DilepDev@users.noreply.github.com> +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Djordje Lukic +Dmitriy Fishman +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dominik Braun +Don Kjer +Dong Chen +DongGeon Lee +Doug Davis +Drew Erny +Ed Costello +Ed Morley <501702+edmorley@users.noreply.github.com> +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eng Zer Jun +Eric Bode +Eric Curtin +Eric Engestrom +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik Humphrey +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evan Lezar +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Geyer +Felix Hupfeld +Felix Rabe +fezzik1620 +Filip Jareš +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Gore +Gabriel Nicolas Avellaneda +Gabriela Georgieva +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Margaritis +George Xie +Gianluca Borello +Giau. Tran Minh +Giedrius Jonikas +Gildas Cuisinier +Gio d'Amelio +Gleb Stsenov +Goksu Toprak +Gou Rao +Govind Rai +Grace Choi +Graeme Wiebe +Grant Reaber +Greg Pflaum +Gsealy +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +Guillaume Tardif +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Hector S +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hossein Abbasi <16090309+hsnabszhdn@users.noreply.github.com> +Hu Keping +Huayi Zhang +Hugo Chastel +Hugo Gabriel Eyherabide +huqun +Huu Nguyen +Hyzhou Zhy +Iain MacDonald +Iain Samuel McLean Elder +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Grund +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jacopo Rigoli +Jaivish Kothari +Jake Lambert +Jake Sanders +Jake Stokes +Jakub Panek +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Hall +Jason Heiss +Jason Plum +Jay Kamat +Jean Lecordier +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jennings Zhang +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jianyong Wu +Jie Luo +Jilles Oldenbeuving +Jim Chen +Jim Galasyn +Jim Lin +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Abbey +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard +John Howard +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jon Johnson +Jon Zeolla +Jonatas Baldin +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonathan Warriss-Simmons +Jonh Wendell +Jordan Jennings +Jorge Vallecillo +Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julian +Julien Barbier +Julien Kassar +Julien Maitrehenry +Julio Cesar Garcia +Justas Brazauskas +Justin Chadwell +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Kelton Bassingthwaite +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Alvarez +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +Kevin Woblick +khaled souf +Kim Eik +Kir Kolyshkin +Kirill A. Korinsky +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Kyle Mitofsky +Lachlan Cooper +Lai Jiangshan +Lajos Papp +Lars Kellogg-Stedman +Laura Brehm +Laura Frank +Laurent Erignoux +Laurent Goderre +Lee Gaines +Lei Jitang +Lennie +lentil32 +Leo Gallucci +Leonid Skorospelov +Lewis Daly +Li Fu Bang +Li Yi +Li Zeghong +Liang-Chi Hsieh +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Lovekesh Kumar +Luca Favatella +Luca Marturana +Lucas Chan +Luis Henrique Mulinari +Luka Hartwig +Lukas Heeren +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Maciej Kalisz +Madhav Puri +Madhu Venugopal +Madhur Batra +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marc Cornellà +Marco Mariani +Marco Spiess +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Ileana +Marius Meschter +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathias Duedahl <64321057+Lussebullen@users.noreply.github.com> +Mathieu Champlon +Mathieu Rollet +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Matthieu MOREL +Mauro Porras P +Max Shytikov +Max-Julian Pogner +Maxime Petazzoni +Maximillian Fan Xavier +Mei ChunTao +Melroy van den Berg +Mert Şişmanoğlu +Metal <2466052+tedhexaflow@users.noreply.github.com> +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael Tews +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Dalton +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Miroslav Gula +Misty Stanley-Jones +Mohammad Banikazemi +Mohammad Hossein +Mohammed Aaqib Ansari +Mohammed Aminu Futa +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Morten Hekkvang +Morten Linderud +Moysés Borges +Mozi <29089388+pzhlkj6612@users.noreply.github.com> +Mrunal Patel +muicoder +Murukesh Mohanan +Muthukumar R +Máximo Cuadros +Mårten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nick Santos +Nick Sieger +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +NinaLua +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Silas +Noah Treuhaft +O.S. Tezer +Oded Arbel +Odin Ugedal +ohmystack +OKA Naoya +Oliver Pomeroy +Olle Jonsson +Olli Janatuinen +Oscar Wieman +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Daigle <114765035+pdaig@users.noreply.github.com> +Patrick Hemmer +Patrick Lang +Patrick St. laurent +Paul +Paul Kehrer +Paul Lietar +Paul Mulders +Paul Rogalski +Paul Seyfert +Paul Weaver +Pavel Pospisil +Paweł Gronowski +Paweł Pokrywka +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Dave Hello +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +Phong Tran +Pieter E Smit +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +QQ喵 +qudongfang +Raghavendra K T +Rahul Kadyan +Rahul Zoldyck +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Rob Murray +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Roch Feuillade +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Rui JingAn +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Sam Thibault +Samarth Shah +Sambuddha Basu +Sami Tabet +Samuel Cochran +Samuel Karp +Sandro Jäckel +Santhosh Manohar +Sarah Sanders +Sargun Dhillon +Saswat Bhattacharya +Saurabh Kumar +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Simon Heimberg +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +Spring Lee +squeegels +Srini Brahmaroutu +Stavros Panakakis +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Stoica-Marcu Floris-Andrei +Stuart Williams +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Takeshi Koenuma +Takuya Noguchi +Taylor Jones +Teiva Harsanyi +Tejaswini Duggaraju +Tengfei Wang +Teppei Fukuda +Thatcher Peskens +Thibault Coupin +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Sampson +Tim Smith +Tim Waugh +Tim Welsh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Bäckman +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulrich Bareth +Ulysses Souza +Umesh Yadav +Vaclav Struhar +Valentin Lorentz +Vardan Pogosian +Venkateswara Reddy Bukkasamudram +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Ville Skyttä +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenlong Zhang +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Wang +William Henry +Xianglin Gao +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yucheng Wu +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zeel B Patel +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhiwei Liang +Zhou Hao +Zhoulin Xie +Zhu Guihua +Zhuo Zhi +Álex González +Álvaro Lázaro +Átila Camurça Alves +Александр Менщиков <__Singleton__@hackerdom.ru> +徐俊杰 +林博仁 Buo-ren Lin diff --git a/openshift/tools/vendor/github.com/docker/cli/LICENSE b/openshift/tools/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 0000000000..9c8e20ab85 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/openshift/tools/vendor/github.com/docker/cli/NOTICE b/openshift/tools/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 0000000000..1c40faaec6 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/config.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/config.go new file mode 100644 index 0000000000..5a63780509 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/config.go @@ -0,0 +1,176 @@ +package config + +import ( + "fmt" + "io" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" +) + +const ( + // EnvOverrideConfigDir is the name of the environment variable that can be + // used to override the location of the client configuration files (~/.docker). + // + // It takes priority over the default, but can be overridden by the "--config" + // command line option. + EnvOverrideConfigDir = "DOCKER_CONFIG" + + // ConfigFileName is the name of the client configuration file inside the + // config-directory. + ConfigFileName = "config.json" + configFileDir = ".docker" + contextsDir = "contexts" +) + +var ( + initConfigDir = new(sync.Once) + configDir string +) + +// resetConfigDir is used in testing to reset the "configDir" package variable +// and its sync.Once to force re-lookup between tests. +func resetConfigDir() { + configDir = "" + initConfigDir = new(sync.Once) +} + +// getHomeDir returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// On non-Windows platforms, it falls back to nss lookups, if the home +// directory cannot be obtained from environment-variables. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +// +// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker +// as dependency for consumers that only need to read the config-file. +// +// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get +func getHomeDir() string { + home, _ := os.UserHomeDir() + if home == "" && runtime.GOOS != "windows" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// Provider defines an interface for providing the CLI config. +type Provider interface { + ConfigFile() *configfile.ConfigFile +} + +// Dir returns the directory the configuration file is stored in +func Dir() string { + initConfigDir.Do(func() { + configDir = os.Getenv(EnvOverrideConfigDir) + if configDir == "" { + configDir = filepath.Join(getHomeDir(), configFileDir) + } + }) + return configDir +} + +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + +// SetDir sets the directory the configuration file is stored in +func SetDir(dir string) { + // trigger the sync.Once to synchronise with Dir() + initConfigDir.Do(func() {}) + configDir = filepath.Clean(dir) +} + +// Path returns the path to a file relative to the config dir +func Path(p ...string) (string, error) { + path := filepath.Join(append([]string{Dir()}, p...)...) + if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { + return "", fmt.Errorf("path %q is outside of root config directory %q", path, Dir()) + } + return path, nil +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader. It returns an error if configData is malformed. +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration file ([ConfigFileName]) from the given directory. +// If no directory is given, it uses the default [Dir]. A [*configfile.ConfigFile] +// is returned containing the contents of the configuration file, or a default +// struct if no configfile exists in the given location. +// +// Load returns an error if a configuration file exists in the given location, +// but cannot be read, or is malformed. Consumers must handle errors to prevent +// overwriting an existing configuration file. +func Load(configDir string) (*configfile.ConfigFile, error) { + if configDir == "" { + configDir = Dir() + } + return load(configDir) +} + +func load(configDir string) (*configfile.ConfigFile, error) { + filename := filepath.Join(configDir, ConfigFileName) + configFile := configfile.New(filename) + + file, err := os.Open(filename) + if err != nil { + if os.IsNotExist(err) { + // It is OK for no configuration file to be present, in which + // case we return a default struct. + return configFile, nil + } + // Any other error happening when failing to read the file must be returned. + return configFile, fmt.Errorf("loading config file: %w", err) + } + defer func() { _ = file.Close() }() + err = configFile.LoadFromReader(file) + if err != nil { + err = fmt.Errorf("parsing config file (%s): %w", filename, err) + } + return configFile, err +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// a reference to the ConfigFile struct. If none is found or when failing to load +// the configuration file, it initializes a default ConfigFile struct. If no +// credentials-store is set in the configuration file, it attempts to discover +// the default store to use for the current platform. +// +// Important: LoadDefaultConfigFile prints a warning to stderr when failing to +// load the configuration file, but otherwise ignores errors. Consumers should +// consider using [Load] (and [credentials.DetectDefaultStore]) to detect errors +// when updating the configuration file, to prevent discarding a (malformed) +// configuration file. +func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { + configFile, err := load(Dir()) + if err != nil { + // FIXME(thaJeztah): we should not proceed here to prevent overwriting existing (but malformed) config files; see https://github.com/docker/cli/issues/5075 + _, _ = fmt.Fprintln(stderr, "WARNING: Error", err) + } + if !configFile.ContainsAuth() { + configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) + } + return configFile +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file.go new file mode 100644 index 0000000000..fab3ed4cba --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -0,0 +1,441 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/memorystore" + "github.com/docker/cli/cli/config/types" + "github.com/sirupsen/logrus" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Plugins map[string]map[string]string `json:"plugins,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` + Features map[string]string `json:"features,omitempty"` +} + +type configEnvAuth struct { + Auth string `json:"auth"` +} + +type configEnv struct { + AuthConfigs map[string]configEnvAuth `json:"auths"` +} + +// DockerEnvConfigKey is an environment variable that contains a JSON encoded +// credential config. It only supports storing the credentials as a base64 +// encoded string in the format base64("username:pat"). +// +// Adding additional fields will produce a parsing error. +// +// Example: +// +// { +// "auths": { +// "example.test": { +// "auth": base64-encoded-username-pat +// } +// } +// } +const DockerEnvConfigKey = "DOCKER_AUTH_CONFIG" + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` + AllProxy string `json:"allProxy,omitempty"` +} + +// New initializes an empty configuration file for the given filename 'fn' +func New(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + Plugins: make(map[string]map[string]string), + Aliases: make(map[string]string), + } +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + if ac.Auth != "" { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// GetAuthConfigs returns the mapping of repo to auth configuration +func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { + if configFile.AuthConfigs == nil { + configFile.AuthConfigs = make(map[string]types.AuthConfig) + } + return configFile.AuthConfigs +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + // User-Agent header is automatically set, and should not be stored in the configuration + for v := range configFile.HTTPHeaders { + if strings.EqualFold(v, "User-Agent") { + delete(configFile.HTTPHeaders, v) + } + } + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() (retErr error) { + if configFile.Filename == "" { + return errors.New("can't save config with empty filename") + } + + dir := filepath.Dir(configFile.Filename) + if err := os.MkdirAll(dir, 0o700); err != nil { + return err + } + temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename)) + if err != nil { + return err + } + defer func() { + // ignore error as the file may already be closed when we reach this. + _ = temp.Close() + if retErr != nil { + if err := os.Remove(temp.Name()); err != nil { + logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") + } + } + }() + + err = configFile.SaveToWriter(temp) + if err != nil { + return err + } + + if err := temp.Close(); err != nil { + return fmt.Errorf("error closing temp file: %w", err) + } + + // Handle situation where the configfile is a symlink, and allow for dangling symlinks + cfgFile := configFile.Filename + if f, err := filepath.EvalSymlinks(cfgFile); err == nil { + cfgFile = f + } else if os.IsNotExist(err) { + // extract the path from the error if the configfile does not exist or is a dangling symlink + var pathError *os.PathError + if errors.As(err, &pathError) { + cfgFile = pathError.Path + } + } + + // Try copying the current config file (if any) ownership and permissions + copyFilePermissions(cfgFile, temp.Name()) + return os.Rename(temp.Name(), cfgFile) +} + +// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and +// then checking this against any environment variables provided to the container +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { + var cfgKey string + + if _, ok := configFile.Proxies[host]; !ok { + cfgKey = "default" + } else { + cfgKey = host + } + + config := configFile.Proxies[cfgKey] + permitted := map[string]*string{ + "HTTP_PROXY": &config.HTTPProxy, + "HTTPS_PROXY": &config.HTTPSProxy, + "NO_PROXY": &config.NoProxy, + "FTP_PROXY": &config.FTPProxy, + "ALL_PROXY": &config.AllProxy, + } + m := runOpts + if m == nil { + m = make(map[string]*string) + } + for k := range permitted { + if *permitted[k] == "" { + continue + } + if _, ok := m[k]; !ok { + m[k] = permitted[k] + } + if _, ok := m[strings.ToLower(k)]; !ok { + m[strings.ToLower(k)] = permitted[k] + } + } + return m +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", errors.New("something went wrong decoding auth config") + } + userName, password, ok := strings.Cut(string(decoded), ":") + if !ok || userName == "" { + return "", "", errors.New("invalid auth configuration file") + } + return userName, strings.Trim(password, "\x00"), nil +} + +// GetCredentialsStore returns a new credentials store from the settings in the +// configuration file +func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + store := credentials.NewFileStore(configFile) + + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { + store = newNativeStore(configFile, helper) + } + + envConfig := os.Getenv(DockerEnvConfigKey) + if envConfig == "" { + return store + } + + authConfig, err := parseEnvConfig(envConfig) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + // use DOCKER_AUTH_CONFIG if set + // it uses the native or file store as a fallback to fetch and store credentials + envStore, err := memorystore.New( + memorystore.WithAuthConfig(authConfig), + memorystore.WithFallbackStore(store), + ) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + return envStore +} + +func parseEnvConfig(v string) (map[string]types.AuthConfig, error) { + envConfig := &configEnv{} + decoder := json.NewDecoder(strings.NewReader(v)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(envConfig); err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + if decoder.More() { + return nil, errors.New("DOCKER_AUTH_CONFIG does not support more than one JSON object") + } + + authConfigs := make(map[string]types.AuthConfig) + for addr, envAuth := range envConfig.AuthConfigs { + if envAuth.Auth == "" { + return nil, fmt.Errorf("DOCKER_AUTH_CONFIG environment variable is missing key `auth` for %s", addr) + } + username, password, err := decodeAuth(envAuth.Auth) + if err != nil { + return nil, err + } + authConfigs[addr] = types.AuthConfig{ + Username: username, + Password: password, + ServerAddress: addr, + } + } + return authConfigs, nil +} + +// var for unit testing. +var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { + return credentials.NewNativeStore(configFile, helperSuffix) +} + +// GetAuthConfig for a repository from the credential store +func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { + return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { + if c.CredentialHelpers != nil && registryHostname != "" { + if helper, exists := c.CredentialHelpers[registryHostname]; exists { + return helper + } + } + return c.CredentialsStore +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + addAll := func(from map[string]types.AuthConfig) { + for reg, ac := range from { + auths[reg] = ac + } + } + + defaultStore := configFile.GetCredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(newAuths) + + // Auth configs from a registry-specific helper should override those from the default store. + for registryHostname := range configFile.CredentialHelpers { + newAuth, err := configFile.GetAuthConfig(registryHostname) + if err != nil { + // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). + logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) + continue + } + auths[registryHostname] = newAuth + } + return auths, nil +} + +// GetFilename returns the file name that this config file is based on. +func (configFile *ConfigFile) GetFilename() string { + return configFile.Filename +} + +// PluginConfig retrieves the requested option for the given plugin. +func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { + if configFile.Plugins == nil { + return "", false + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + return "", false + } + value, ok := pluginConfig[option] + return value, ok +} + +// SetPluginConfig sets the option to the given value for the given +// plugin. Passing a value of "" will remove the option. If removing +// the final config item for a given plugin then also cleans up the +// overall plugin entry. +func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { + if configFile.Plugins == nil { + configFile.Plugins = make(map[string]map[string]string) + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + pluginConfig = make(map[string]string) + configFile.Plugins[pluginname] = pluginConfig + } + if value != "" { + pluginConfig[option] = value + } else { + delete(pluginConfig, option) + } + if len(pluginConfig) == 0 { + delete(configFile.Plugins, pluginname) + } +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go new file mode 100644 index 0000000000..06b811e7d5 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -0,0 +1,35 @@ +//go:build !windows + +package configfile + +import ( + "os" + "syscall" +) + +// copyFilePermissions copies file ownership and permissions from "src" to "dst", +// ignoring any error during the process. +func copyFilePermissions(src, dst string) { + var ( + mode os.FileMode = 0o600 + uid, gid int + ) + + fi, err := os.Stat(src) + if err != nil { + return + } + if fi.Mode().IsRegular() { + mode = fi.Mode() + } + if err := os.Chmod(dst, mode); err != nil { + return + } + + uid = int(fi.Sys().(*syscall.Stat_t).Uid) + gid = int(fi.Sys().(*syscall.Stat_t).Gid) + + if uid > 0 && gid > 0 { + _ = os.Chown(dst, uid, gid) + } +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go new file mode 100644 index 0000000000..42fffc39ad --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go @@ -0,0 +1,5 @@ +package configfile + +func copyFilePermissions(src, dst string) { + // TODO implement for Windows +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/credentials.go new file mode 100644 index 0000000000..28d58ec48d --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store.go new file mode 100644 index 0000000000..a36afc41f4 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import "os/exec" + +// DetectDefaultStore return the default credentials store for the platform if +// no user-defined store is passed, and the store executable is available. +func DetectDefaultStore(store string) string { + if store != "" { + // use user-defined + return store + } + + platformDefault := defaultCredentialsStore() + if platformDefault == "" { + return "" + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err != nil { + return "" + } + return platformDefault +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go new file mode 100644 index 0000000000..5d42dec622 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "osxkeychain" +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go new file mode 100644 index 0000000000..a9012c6d4a --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -0,0 +1,13 @@ +package credentials + +import ( + "os/exec" +) + +func defaultCredentialsStore() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go new file mode 100644 index 0000000000..40c16eb837 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -0,0 +1,7 @@ +//go:build !windows && !darwin && !linux + +package credentials + +func defaultCredentialsStore() string { + return "" +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go new file mode 100644 index 0000000000..bb799ca61b --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "wincred" +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/file_store.go new file mode 100644 index 0000000000..c69312b014 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -0,0 +1,118 @@ +package credentials + +import ( + "fmt" + "net" + "net/url" + "os" + "strings" + "sync/atomic" + + "github.com/docker/cli/cli/config/types" +) + +type store interface { + Save() error + GetAuthConfigs() map[string]types.AuthConfig + GetFilename() string +} + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file store +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file store) Store { + return &fileStore{file: file} +} + +// Erase removes the given credentials from the file store.This function is +// idempotent and does not update the file if credentials did not change. +func (c *fileStore) Erase(serverAddress string) error { + if _, exists := c.file.GetAuthConfigs()[serverAddress]; !exists { + // nothing to do; no credentials found for the given serverAddress + return nil + } + delete(c.file.GetAuthConfigs(), serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.GetAuthConfigs()[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.GetAuthConfigs() { + if serverAddress == ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.GetAuthConfigs(), nil +} + +// unencryptedWarning warns the user when using an insecure credential storage. +// After a deprecation period, user will get prompted if stdin and stderr are a terminal. +// Otherwise, we'll assume they want it (sadly), because people may have been scripting +// insecure logins and we don't want to break them. Maybe they'll see the warning in their +// logs and fix things. +const unencryptedWarning = ` +WARNING! Your credentials are stored unencrypted in '%s'. +Configure a credential helper to remove this warning. See +https://docs.docker.com/go/credential-store/ +` + +// alreadyPrinted ensures that we only print the unencryptedWarning once per +// CLI invocation (no need to warn the user multiple times per command). +var alreadyPrinted atomic.Bool + +// Store saves the given credentials in the file store. This function is +// idempotent and does not update the file if credentials did not change. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + authConfigs := c.file.GetAuthConfigs() + if oldAuthConfig, ok := authConfigs[authConfig.ServerAddress]; ok && oldAuthConfig == authConfig { + // Credentials didn't change, so skip updating the configuration file. + return nil + } + authConfigs[authConfig.ServerAddress] = authConfig + if err := c.file.Save(); err != nil { + return err + } + + if !alreadyPrinted.Load() && authConfig.Password != "" { + // Display a warning if we're storing the users password (not a token). + // + // FIXME(thaJeztah): make output configurable instead of hardcoding to os.Stderr + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf(unencryptedWarning, c.file.GetFilename())) + alreadyPrinted.Store(true) + } + + return nil +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(maybeURL string) string { + stripped := maybeURL + if strings.Contains(stripped, "://") { + u, err := url.Parse(stripped) + if err == nil && u.Hostname() != "" { + if u.Port() == "" { + return u.Hostname() + } + return net.JoinHostPort(u.Hostname(), u.Port()) + } + } + hostName, _, _ := strings.Cut(stripped, "/") + return hostName +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/native_store.go new file mode 100644 index 0000000000..b9af145b9d --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -0,0 +1,147 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" //nolint:gosec // ignore G101: Potential hardcoded credentials + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file store, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + auth.ServerAddress = creds.ServerAddress + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + if ac.ServerAddress == "" { + ac.ServerAddress = creds.ServerAddress + } + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keychain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/memorystore/store.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/memorystore/store.go new file mode 100644 index 0000000000..f8ec62b95a --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/memorystore/store.go @@ -0,0 +1,131 @@ +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.24 + +package memorystore + +import ( + "fmt" + "maps" + "os" + "sync" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" +) + +// notFoundErr is the error returned when a plugin could not be found. +type notFoundErr string + +func (notFoundErr) NotFound() {} + +func (e notFoundErr) Error() string { + return string(e) +} + +var errValueNotFound notFoundErr = "value not found" + +type Config struct { + lock sync.RWMutex + memoryCredentials map[string]types.AuthConfig + fallbackStore credentials.Store +} + +func (e *Config) Erase(serverAddress string) error { + e.lock.Lock() + defer e.lock.Unlock() + delete(e.memoryCredentials, serverAddress) + + if e.fallbackStore != nil { + err := e.fallbackStore.Erase(serverAddress) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } + } + + return nil +} + +func (e *Config) Get(serverAddress string) (types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + authConfig, ok := e.memoryCredentials[serverAddress] + if !ok { + if e.fallbackStore != nil { + return e.fallbackStore.Get(serverAddress) + } + return types.AuthConfig{}, errValueNotFound + } + return authConfig, nil +} + +func (e *Config) GetAll() (map[string]types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + creds := make(map[string]types.AuthConfig) + + if e.fallbackStore != nil { + fileCredentials, err := e.fallbackStore.GetAll() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } else { + creds = fileCredentials + } + } + + maps.Copy(creds, e.memoryCredentials) + return creds, nil +} + +func (e *Config) Store(authConfig types.AuthConfig) error { + e.lock.Lock() + defer e.lock.Unlock() + e.memoryCredentials[authConfig.ServerAddress] = authConfig + + if e.fallbackStore != nil { + return e.fallbackStore.Store(authConfig) + } + return nil +} + +// WithFallbackStore sets a fallback store. +// +// Write operations will be performed on both the memory store and the +// fallback store. +// +// Read operations will first check the memory store, and if the credential +// is not found, it will then check the fallback store. +// +// Retrieving all credentials will return from both the memory store and the +// fallback store, merging the results from both stores into a single map. +// +// Data stored in the memory store will take precedence over data in the +// fallback store. +func WithFallbackStore(store credentials.Store) Options { + return func(s *Config) error { + s.fallbackStore = store + return nil + } +} + +// WithAuthConfig allows to set the initial credentials in the memory store. +func WithAuthConfig(config map[string]types.AuthConfig) Options { + return func(s *Config) error { + s.memoryCredentials = config + return nil + } +} + +type Options func(*Config) error + +// New creates a new in memory credential store +func New(opts ...Options) (credentials.Store, error) { + m := &Config{ + memoryCredentials: make(map[string]types.AuthConfig), + } + for _, opt := range opts { + if err := opt(m); err != nil { + return nil, err + } + } + return m, nil +} diff --git a/openshift/tools/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/openshift/tools/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 0000000000..9fe90003b1 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,17 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/distribution/reference/LICENSE b/openshift/tools/vendor/github.com/docker/distribution/LICENSE similarity index 100% rename from openshift/tools/vendor/github.com/distribution/reference/LICENSE rename to openshift/tools/vendor/github.com/docker/distribution/LICENSE diff --git a/openshift/tools/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/openshift/tools/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 0000000000..2c3ebe1653 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/openshift/tools/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/openshift/tools/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 0000000000..fe238210cd --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/openshift/tools/vendor/github.com/docker/docker-credential-helpers/LICENSE b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 0000000000..1ea555e2af --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/docker/docker-credential-helpers/client/client.go b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 0000000000..7ca5ab7222 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,114 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program(credentials.ActionStore) + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { + err = isValidErr + } + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program(credentials.ActionGet) + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(string(out)) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program(credentials.ActionErase) + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program(credentials.ActionList) + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/openshift/tools/vendor/github.com/docker/docker-credential-helpers/client/command.go b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 0000000000..93863480ba --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,57 @@ +package client + +import ( + "io" + "os" + "os/exec" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell]. +func NewShellProgramFunc(command string) ProgramFunc { + return func(args ...string) Program { + return createProgramCmdRedirectErr(command, args, nil) + } +} + +// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command +// in a [Shell] with the given environment variables. +func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return createProgramCmdRedirectErr(command, args, env) + } +} + +func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell { + ec := exec.Command(command, args...) + if env != nil { + for k, v := range *env { + ec.Env = append(ec.Environ(), k+"="+v) + } + } + ec.Stderr = os.Stderr + return &Shell{cmd: ec} +} + +// Shell invokes shell commands to talk with a remote credentials-helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials-helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials-helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 0000000000..eac5518849 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,209 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Action defines the name of an action (sub-command) supported by a +// credential-helper binary. It is an alias for "string", and mostly +// for convenience. +type Action = string + +// List of actions (sub-commands) supported by credential-helper binaries. +const ( + ActionStore Action = "store" + ActionGet Action = "get" + ActionErase Action = "erase" + ActionList Action = "list" + ActionVersion Action = "version" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials-helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + if len(os.Args) != 2 { + _, _ = fmt.Fprintln(os.Stdout, usage()) + os.Exit(1) + } + + switch os.Args[1] { + case "--version", "-v": + _ = PrintVersion(os.Stdout) + os.Exit(0) + case "--help", "-h": + _, _ = fmt.Fprintln(os.Stdout, usage()) + os.Exit(0) + } + + if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil { + _, _ = fmt.Fprintln(os.Stdout, err) + os.Exit(1) + } +} + +func usage() string { + return fmt.Sprintf("Usage: %s ", Name) +} + +// HandleCommand runs a helper to execute a credential action. +func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error { + switch action { + case ActionStore: + return Store(helper, in) + case ActionGet: + return Get(helper, in, out) + case ActionErase: + return Erase(helper, in) + case ActionList: + return List(helper, out) + case ActionVersion: + return PrintVersion(out) + default: + return fmt.Errorf("%s: unknown action: %s", Name, action) + } +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + buffer.Reset() + err = json.NewEncoder(buffer).Encode(Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + }) + if err != nil { + return err + } + + _, _ = fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +// List returns all the serverURLs of keys in +// the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +// PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + _, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) + return nil +} diff --git a/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 0000000000..2283d5a44c --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,124 @@ +package credentials + +import ( + "errors" + "strings" +) + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface. +// +// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound +func (errCredentialsNotFound) NotFound() {} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + var target errCredentialsNotFound + return errors.As(err, &target) +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return strings.TrimSpace(err) == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] +// interface. +// +// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter +func (errCredentialsMissingServerURL) InvalidParameter() {} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] +// interface. +// +// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter +func (errCredentialsMissingUsername) InvalidParameter() {} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + var target errCredentialsMissingServerURL + return errors.As(err, &target) +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + var target errCredentialsMissingUsername + return errors.As(err, &target) +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage +} diff --git a/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 0000000000..135acd254d --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 0000000000..84377c2630 --- /dev/null +++ b/openshift/tools/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,16 @@ +package credentials + +var ( + // Name is filled at linking time + Name = "" + + // Package is filled at linking time + Package = "github.com/docker/docker-credential-helpers" + + // Version holds the complete version number. Filled in at linking time. + Version = "v0.0.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/.drone.yml b/openshift/tools/vendor/github.com/drone/envsubst/v2/.drone.yml deleted file mode 100644 index 2b062137f3..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/.drone.yml +++ /dev/null @@ -1,8 +0,0 @@ -kind: pipeline -name: default - -steps: -- name: build - image: golang:1.11 - commands: - - go test -v ./... diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/.gitignore b/openshift/tools/vendor/github.com/drone/envsubst/v2/.gitignore deleted file mode 100644 index 8e5e2574eb..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/envsubst -coverage.out diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/LICENSE b/openshift/tools/vendor/github.com/drone/envsubst/v2/LICENSE deleted file mode 100644 index 1de55b7f4f..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 drone.io - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/eval.go b/openshift/tools/vendor/github.com/drone/envsubst/v2/eval.go deleted file mode 100644 index 375ca4c9fc..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/eval.go +++ /dev/null @@ -1,19 +0,0 @@ -package envsubst - -import "os" - -// Eval replaces ${var} in the string based on the mapping function. -func Eval(s string, mapping func(string) string) (string, error) { - t, err := Parse(s) - if err != nil { - return s, err - } - return t.Execute(mapping) -} - -// EvalEnv replaces ${var} in the string according to the values of the -// current environment variables. References to undefined variables are -// replaced by the empty string. -func EvalEnv(s string) (string, error) { - return Eval(s, os.Getenv) -} diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/funcs.go b/openshift/tools/vendor/github.com/drone/envsubst/v2/funcs.go deleted file mode 100644 index 904bb604d4..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/funcs.go +++ /dev/null @@ -1,246 +0,0 @@ -package envsubst - -import ( - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/drone/envsubst/v2/path" -) - -// defines a parameter substitution function. -type substituteFunc func(string, ...string) string - -// toLen returns the length of string s. -func toLen(s string, args ...string) string { - return strconv.Itoa(len(s)) -} - -// toLower returns a copy of the string s with all characters -// mapped to their lower case. -func toLower(s string, args ...string) string { - return strings.ToLower(s) -} - -// toUpper returns a copy of the string s with all characters -// mapped to their upper case. -func toUpper(s string, args ...string) string { - return strings.ToUpper(s) -} - -// toLowerFirst returns a copy of the string s with the first -// character mapped to its lower case. -func toLowerFirst(s string, args ...string) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(unicode.ToLower(r)) + s[n:] -} - -// toUpperFirst returns a copy of the string s with the first -// character mapped to its upper case. -func toUpperFirst(s string, args ...string) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(unicode.ToUpper(r)) + s[n:] -} - -// toDefault returns a copy of the string s if not empty, else -// returns a concatenation of the args without a separator. -func toDefault(s string, args ...string) string { - if len(s) == 0 && len(args) > 0 { - // don't use any separator - s = strings.Join(args, "") - } - return s -} - -// toSubstr returns a slice of the string s at the specified -// length and position. -func toSubstr(s string, args ...string) string { - if len(args) == 0 { - return s // should never happen - } - - pos, err := strconv.Atoi(args[0]) - if err != nil { - // bash returns the string if the position - // cannot be parsed. - return s - } - - if pos < 0 { - // if pos is negative (counts from the end) add it - // to length to get first character offset - pos = len(s) + pos - - // if negative offset exceeds the length of the string - // start from 0 - if pos < 0 { - pos = 0 - } - } - - if len(args) == 1 { - if pos < len(s) { - return s[pos:] - } - // if the position exceeds the length of the - // string an empty string is returned - return "" - } - - length, err := strconv.Atoi(args[1]) - if err != nil { - // bash returns the string if the length - // cannot be parsed. - return s - } - - if pos+length >= len(s) { - if pos < len(s) { - // if the position exceeds the length of the - // string just return the rest of it like bash - return s[pos:] - } - // if the position exceeds the length of the - // string an empty string is returned - return "" - } - - return s[pos : pos+length] -} - -// replaceAll returns a copy of the string s with all instances -// of the substring replaced with the replacement string. -func replaceAll(s string, args ...string) string { - switch len(args) { - case 0: - return s - case 1: - return strings.Replace(s, args[0], "", -1) - default: - return strings.Replace(s, args[0], args[1], -1) - } -} - -// replaceFirst returns a copy of the string s with the first -// instance of the substring replaced with the replacement string. -func replaceFirst(s string, args ...string) string { - switch len(args) { - case 0: - return s - case 1: - return strings.Replace(s, args[0], "", 1) - default: - return strings.Replace(s, args[0], args[1], 1) - } -} - -// replacePrefix returns a copy of the string s with the matching -// prefix replaced with the replacement string. -func replacePrefix(s string, args ...string) string { - if len(args) != 2 { - return s - } - if strings.HasPrefix(s, args[0]) { - return strings.Replace(s, args[0], args[1], 1) - } - return s -} - -// replaceSuffix returns a copy of the string s with the matching -// suffix replaced with the replacement string. -func replaceSuffix(s string, args ...string) string { - if len(args) != 2 { - return s - } - if strings.HasSuffix(s, args[0]) { - s = strings.TrimSuffix(s, args[0]) - s = s + args[1] - } - return s -} - -// TODO - -func trimShortestPrefix(s string, args ...string) string { - if len(args) != 0 { - s = trimShortest(s, args[0]) - } - return s -} - -func trimShortestSuffix(s string, args ...string) string { - if len(args) != 0 { - r := reverse(s) - rarg := reverse(args[0]) - s = reverse(trimShortest(r, rarg)) - } - return s -} - -func trimLongestPrefix(s string, args ...string) string { - if len(args) != 0 { - s = trimLongest(s, args[0]) - } - return s -} - -func trimLongestSuffix(s string, args ...string) string { - if len(args) != 0 { - r := reverse(s) - rarg := reverse(args[0]) - s = reverse(trimLongest(r, rarg)) - } - return s -} - -func trimShortest(s, arg string) string { - var shortestMatch string - for i := 0; i < len(s); i++ { - match, err := path.Match(arg, s[0:len(s)-i]) - - if err != nil { - return s - } - - if match { - shortestMatch = s[0 : len(s)-i] - } - } - - if shortestMatch != "" { - return strings.TrimPrefix(s, shortestMatch) - } - - return s -} - -func trimLongest(s, arg string) string { - for i := 0; i < len(s); i++ { - match, err := path.Match(arg, s[0:len(s)-i]) - - if err != nil { - return s - } - - if match { - return strings.TrimPrefix(s, s[0:len(s)-i]) - } - } - - return s -} - -func reverse(s string) string { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r) -} diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/node.go b/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/node.go deleted file mode 100644 index 09787eb92d..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/node.go +++ /dev/null @@ -1,86 +0,0 @@ -package parse - -// Node is an element in the parse tree. -type Node interface { - node() -} - -// empty string node -var empty = new(TextNode) - -// a template is represented by a tree consisting of one -// or more of the following nodes. -type ( - // TextNode represents a string of text. - TextNode struct { - Value string - } - - // FuncNode represents a string function. - FuncNode struct { - Param string - Name string - Args []Node - } - - // ListNode represents a list of nodes. - ListNode struct { - Nodes []Node - } - - // ParamNode struct{ - // Name string - // } - // - // CaseNode struct { - // Name string - // First bool - // } - // - // LowerNode struct { - // Name string - // First bool - // } - // - // SubstrNode struct { - // Name string - // Pos Node - // Len Node - // } - // - // ReplaceNode struct { - // Name string - // Substring Node - // Replacement Node - // } - // - // TrimNode struct{ - // - // } - // - // DefaultNode struct { - // Name string - // Default Node - // } -) - -// newTextNode returns a new TextNode. -func newTextNode(text string) *TextNode { - return &TextNode{Value: text} -} - -// newListNode returns a new ListNode. -func newListNode(nodes ...Node) *ListNode { - return &ListNode{Nodes: nodes} -} - -// newFuncNode returns a new FuncNode. -func newFuncNode(name string) *FuncNode { - return &FuncNode{Param: name} -} - -// node() defines the node in a parse tree - -func (*TextNode) node() {} -func (*ListNode) node() {} -func (*FuncNode) node() {} diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/parse.go b/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/parse.go deleted file mode 100644 index bc418fbfbc..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/parse.go +++ /dev/null @@ -1,402 +0,0 @@ -package parse - -import ( - "errors" -) - -var ( - // ErrBadSubstitution represents a substitution parsing error. - ErrBadSubstitution = errors.New("bad substitution") - - // ErrMissingClosingBrace represents a missing closing brace "}" error. - ErrMissingClosingBrace = errors.New("missing closing brace") - - // ErrParseVariableName represents the error when unable to parse a - // variable name within a substitution. - ErrParseVariableName = errors.New("unable to parse variable name") - - // ErrParseFuncSubstitution represents the error when unable to parse the - // substitution within a function parameter. - ErrParseFuncSubstitution = errors.New("unable to parse substitution within function") - - // ErrParseDefaultFunction represent the error when unable to parse a - // default function. - ErrParseDefaultFunction = errors.New("unable to parse default function") -) - -// Tree is the representation of a single parsed SQL statement. -type Tree struct { - Root Node - - // Parsing only; cleared after parse. - scanner *scanner -} - -// Parse parses the string and returns a Tree. -func Parse(buf string) (*Tree, error) { - t := new(Tree) - t.scanner = new(scanner) - return t.Parse(buf) -} - -// Parse parses the string buffer to construct an ast -// representation for expansion. -func (t *Tree) Parse(buf string) (tree *Tree, err error) { - t.scanner.init(buf) - t.Root, err = t.parseAny() - return t, err -} - -func (t *Tree) parseAny() (Node, error) { - t.scanner.accept = acceptRune - t.scanner.mode = scanIdent | scanLbrack | scanEscape - t.scanner.escapeChars = dollar - - switch t.scanner.scan() { - case tokenIdent: - left := newTextNode( - t.scanner.string(), - ) - right, err := t.parseAny() - switch { - case err != nil: - return nil, err - case right == empty: - return left, nil - } - return newListNode(left, right), nil - case tokenEOF: - return empty, nil - case tokenLbrack: - left, err := t.parseFunc() - if err != nil { - return nil, err - } - - right, err := t.parseAny() - switch { - case err != nil: - return nil, err - case right == empty: - return left, nil - } - return newListNode(left, right), nil - } - - return nil, ErrBadSubstitution -} - -func (t *Tree) parseFunc() (Node, error) { - // Turn on all escape characters - t.scanner.escapeChars = escapeAll - switch t.scanner.peek() { - case '#': - return t.parseLenFunc() - } - - var name string - t.scanner.accept = acceptIdent - t.scanner.mode = scanIdent - - switch t.scanner.scan() { - case tokenIdent: - name = t.scanner.string() - default: - return nil, ErrParseVariableName - } - - switch t.scanner.peek() { - case ':': - return t.parseDefaultOrSubstr(name) - case '=': - return t.parseDefaultFunc(name) - case ',', '^': - return t.parseCasingFunc(name) - case '/': - return t.parseReplaceFunc(name) - case '#': - return t.parseRemoveFunc(name, acceptHashFunc) - case '%': - return t.parseRemoveFunc(name, acceptPercentFunc) - } - - t.scanner.accept = acceptIdent - t.scanner.mode = scanRbrack - switch t.scanner.scan() { - case tokenRbrack: - return newFuncNode(name), nil - default: - return nil, ErrMissingClosingBrace - } -} - -// parse a substitution function parameter. -func (t *Tree) parseParam(accept acceptFunc, mode byte) (Node, error) { - t.scanner.accept = accept - t.scanner.mode = mode | scanLbrack - switch t.scanner.scan() { - case tokenLbrack: - return t.parseFunc() - case tokenIdent: - return newTextNode( - t.scanner.string(), - ), nil - case tokenRbrack: - return newTextNode( - t.scanner.string(), - ), nil - default: - return nil, ErrParseFuncSubstitution - } -} - -// parse either a default or substring substitution function. -func (t *Tree) parseDefaultOrSubstr(name string) (Node, error) { - t.scanner.read() - r := t.scanner.peek() - t.scanner.unread() - switch r { - case '=', '-', '?', '+': - return t.parseDefaultFunc(name) - default: - return t.parseSubstrFunc(name) - } -} - -// parses the ${param:offset} string function -// parses the ${param:offset:length} string function -func (t *Tree) parseSubstrFunc(name string) (Node, error) { - node := new(FuncNode) - node.Param = name - - t.scanner.accept = acceptOneColon - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - node.Name = t.scanner.string() - default: - return nil, ErrBadSubstitution - } - - // scan arg[1] - { - param, err := t.parseParam(rejectColonClose, scanIdent) - if err != nil { - return nil, err - } - - // param.Value = t.scanner.string() - node.Args = append(node.Args, param) - } - - // expect delimiter or close - t.scanner.accept = acceptColon - t.scanner.mode = scanIdent | scanRbrack - switch t.scanner.scan() { - case tokenRbrack: - return node, nil - case tokenIdent: - // no-op - default: - return nil, ErrBadSubstitution - } - - // scan arg[2] - { - param, err := t.parseParam(acceptNotClosing, scanIdent) - if err != nil { - return nil, err - } - node.Args = append(node.Args, param) - } - - return node, t.consumeRbrack() -} - -// parses the ${param%word} string function -// parses the ${param%%word} string function -// parses the ${param#word} string function -// parses the ${param##word} string function -func (t *Tree) parseRemoveFunc(name string, accept acceptFunc) (Node, error) { - node := new(FuncNode) - node.Param = name - - t.scanner.accept = accept - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - node.Name = t.scanner.string() - default: - return nil, ErrBadSubstitution - } - - // scan arg[1] - { - param, err := t.parseParam(acceptNotClosing, scanIdent) - if err != nil { - return nil, err - } - - // param.Value = t.scanner.string() - node.Args = append(node.Args, param) - } - - return node, t.consumeRbrack() -} - -// parses the ${param/pattern/string} string function -// parses the ${param//pattern/string} string function -// parses the ${param/#pattern/string} string function -// parses the ${param/%pattern/string} string function -func (t *Tree) parseReplaceFunc(name string) (Node, error) { - node := new(FuncNode) - node.Param = name - - t.scanner.accept = acceptReplaceFunc - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - node.Name = t.scanner.string() - default: - return nil, ErrBadSubstitution - } - - // scan arg[1] - { - param, err := t.parseParam(acceptNotSlash, scanIdent|scanEscape) - if err != nil { - return nil, err - } - node.Args = append(node.Args, param) - } - - // expect delimiter - t.scanner.accept = acceptSlash - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - // no-op - default: - return nil, ErrBadSubstitution - } - - // check for blank string - switch t.scanner.peek() { - case '}': - return node, t.consumeRbrack() - } - - // scan arg[2] - { - param, err := t.parseParam(acceptNotClosing, scanIdent|scanEscape) - if err != nil { - return nil, err - } - node.Args = append(node.Args, param) - } - - return node, t.consumeRbrack() -} - -// parses the ${parameter=word} string function -// parses the ${parameter:=word} string function -// parses the ${parameter:-word} string function -// parses the ${parameter:?word} string function -// parses the ${parameter:+word} string function -func (t *Tree) parseDefaultFunc(name string) (Node, error) { - node := new(FuncNode) - node.Param = name - - t.scanner.accept = acceptDefaultFunc - if t.scanner.peek() == '=' { - t.scanner.accept = acceptOneEqual - } - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - node.Name = t.scanner.string() - default: - return nil, ErrParseDefaultFunction - } - - // loop through all possible runes in default param - for { - // this acts as the break condition. Peek to see if we reached the end - switch t.scanner.peek() { - case '}': - return node, t.consumeRbrack() - } - param, err := t.parseParam(acceptNotClosing, scanIdent) - if err != nil { - return nil, err - } - - node.Args = append(node.Args, param) - } -} - -// parses the ${param,} string function -// parses the ${param,,} string function -// parses the ${param^} string function -// parses the ${param^^} string function -func (t *Tree) parseCasingFunc(name string) (Node, error) { - node := new(FuncNode) - node.Param = name - - t.scanner.accept = acceptCasingFunc - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - node.Name = t.scanner.string() - default: - return nil, ErrBadSubstitution - } - - return node, t.consumeRbrack() -} - -// parses the ${#param} string function -func (t *Tree) parseLenFunc() (Node, error) { - node := new(FuncNode) - - t.scanner.accept = acceptOneHash - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - node.Name = t.scanner.string() - default: - return nil, ErrBadSubstitution - } - - t.scanner.accept = acceptIdent - t.scanner.mode = scanIdent - switch t.scanner.scan() { - case tokenIdent: - node.Param = t.scanner.string() - default: - return nil, ErrBadSubstitution - } - - return node, t.consumeRbrack() -} - -// consumeRbrack consumes a right closing bracket. If a closing -// bracket token is not consumed an ErrBadSubstitution is returned. -func (t *Tree) consumeRbrack() error { - t.scanner.mode = scanRbrack - if t.scanner.scan() != tokenRbrack { - return ErrBadSubstitution - } - return nil -} - -// consumeDelimiter consumes a function argument delimiter. If a -// delimiter is not consumed an ErrBadSubstitution is returned. -// func (t *Tree) consumeDelimiter(accept acceptFunc, mode uint) error { -// t.scanner.accept = accept -// t.scanner.mode = mode -// if t.scanner.scan() != tokenRbrack { -// return ErrBadSubstitution -// } -// return nil -// } diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/scan.go b/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/scan.go deleted file mode 100644 index 2710879ac4..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/parse/scan.go +++ /dev/null @@ -1,294 +0,0 @@ -package parse - -import ( - "unicode" - "unicode/utf8" -) - -// eof rune sent when end of file is reached -var eof = rune(0) - -// token is a lexical token. -type token uint - -// list of lexical tokens. -const ( - // special tokens - tokenIllegal token = iota - tokenEOF - - // identifiers and literals - tokenIdent - - // operators and delimiters - tokenLbrack - tokenRbrack - tokenQuote -) - -// predefined mode bits to control recognition of tokens. -const ( - scanIdent byte = 1 << iota - scanLbrack - scanRbrack - scanEscape -) - -// predefined mode bits to control escape tokens. -const ( - dollar byte = 1 << iota - backslash - escapeAll = dollar | backslash -) - -// returns true if rune is accepted. -type acceptFunc func(r rune, i int) bool - -// scanner implements a lexical scanner that reads unicode -// characters and tokens from a string buffer. -type scanner struct { - buf string - pos int - start int - width int - mode byte - escapeChars byte - - accept acceptFunc -} - -// init initializes a scanner with a new buffer. -func (s *scanner) init(buf string) { - s.buf = buf - s.pos = 0 - s.start = 0 - s.width = 0 - s.accept = nil -} - -// read returns the next unicode character. It returns eof at -// the end of the string buffer. -func (s *scanner) read() rune { - if s.pos >= len(s.buf) { - s.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(s.buf[s.pos:]) - s.width = w - s.pos += s.width - return r -} - -func (s *scanner) unread() { - s.pos -= s.width -} - -// skip skips over the curring unicode character in the buffer -// by slicing and removing from the buffer. -func (s *scanner) skip() { - l := s.buf[:s.pos-1] - r := s.buf[s.pos:] - s.buf = l + r -} - -// peek returns the next unicode character in the buffer without -// advancing the scanner. It returns eof if the scanner's position -// is at the last character of the source. -func (s *scanner) peek() rune { - r := s.read() - s.unread() - return r -} - -// string returns the string corresponding to the most recently -// scanned token. Valid after calling scan(). -func (s *scanner) string() string { - return s.buf[s.start:s.pos] -} - -// tests if the bit exists for a given character bit -func (s *scanner) shouldEscape(character byte) bool { - return s.escapeChars&character != 0 -} - -// scan reads the next token or Unicode character from source and -// returns it. It returns EOF at the end of the source. -func (s *scanner) scan() token { - s.start = s.pos - r := s.read() - switch { - case r == eof: - return tokenEOF - case s.scanLbrack(r): - return tokenLbrack - case s.scanRbrack(r): - return tokenRbrack - case s.scanIdent(r): - return tokenIdent - } - return tokenIllegal -} - -// scanIdent reads the next token or Unicode character from source -// and returns true if the Ident character is accepted. -func (s *scanner) scanIdent(r rune) bool { - if s.mode&scanIdent == 0 { - return false - } - if s.scanEscaped(r) { - s.skip() - } else if !s.accept(r, s.pos-s.start) { - return false - } -loop: - for { - r := s.read() - switch { - case r == eof: - s.unread() - break loop - case s.scanLbrack(r): - s.unread() - s.unread() - break loop - } - if s.scanEscaped(r) { - s.skip() - continue - } - if !s.accept(r, s.pos-s.start) { - s.unread() - break loop - } - } - return true -} - -// scanLbrack reads the next token or Unicode character from source -// and returns true if the open bracket is encountered. -func (s *scanner) scanLbrack(r rune) bool { - if s.mode&scanLbrack == 0 { - return false - } - if r == '$' { - if s.read() == '{' { - return true - } - s.unread() - } - return false -} - -// scanRbrack reads the next token or Unicode character from source -// and returns true if the closing bracket is encountered. -func (s *scanner) scanRbrack(r rune) bool { - if s.mode&scanRbrack == 0 { - return false - } - return r == '}' -} - -// scanEscaped reads the next token or Unicode character from source -// and returns true if it being escaped and should be skipped. -func (s *scanner) scanEscaped(r rune) bool { - if s.mode&scanEscape == 0 { - return false - } - if r == '$' && s.shouldEscape(dollar) { - if s.peek() == '$' { - return true - } - } - if r == '\\' && s.shouldEscape(backslash) { - switch s.peek() { - case '/', '\\': - return true - default: - return false - } - } - - return false -} - -// -// scanner functions accept or reject runes. -// - -func acceptRune(r rune, i int) bool { - return true -} - -func acceptIdent(r rune, i int) bool { - return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' -} - -func acceptColon(r rune, i int) bool { - return r == ':' -} - -func acceptOneHash(r rune, i int) bool { - return r == '#' && i == 1 -} - -func acceptNone(r rune, i int) bool { - return false -} - -func acceptNotClosing(r rune, i int) bool { - return r != '}' -} - -func acceptHashFunc(r rune, i int) bool { - return r == '#' && i < 3 -} - -func acceptPercentFunc(r rune, i int) bool { - return r == '%' && i < 3 -} - -func acceptDefaultFunc(r rune, i int) bool { - switch { - case i == 1 && r == ':': - return true - case i == 2 && (r == '=' || r == '-' || r == '?' || r == '+'): - return true - default: - return false - } -} - -func acceptReplaceFunc(r rune, i int) bool { - switch { - case i == 1 && r == '/': - return true - case i == 2 && (r == '/' || r == '#' || r == '%'): - return true - default: - return false - } -} - -func acceptOneEqual(r rune, i int) bool { - return i == 1 && r == '=' -} - -func acceptOneColon(r rune, i int) bool { - return i == 1 && r == ':' -} - -func rejectColonClose(r rune, i int) bool { - return r != ':' && r != '}' -} - -func acceptSlash(r rune, i int) bool { - return r == '/' -} - -func acceptNotSlash(r rune, i int) bool { - return r != '/' -} - -func acceptCasingFunc(r rune, i int) bool { - return (r == ',' || r == '^') && i < 3 -} diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/path/match.go b/openshift/tools/vendor/github.com/drone/envsubst/v2/path/match.go deleted file mode 100644 index 9306b0c9d6..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/path/match.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import ( - "errors" - "unicode/utf8" -) - -// ErrBadPattern indicates a globbing pattern was malformed. -var ErrBadPattern = errors.New("syntax error in pattern") - -// Match reports whether name matches the shell file name pattern. -// The pattern syntax is: -// -// pattern: -// { term } -// term: -// '*' matches any sequence of non-/ characters -// '?' matches any single non-/ character -// '[' [ '^' ] { character-range } ']' -// character class (must be non-empty) -// c matches character c (c != '*', '?', '\\', '[') -// '\\' c matches character c -// -// character-range: -// c matches character c (c != '\\', '-', ']') -// '\\' c matches character c -// lo '-' hi matches character c for lo <= c <= hi -// -// Match requires pattern to match all of name, not just a substring. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -func Match(pattern, name string) (matched bool, err error) { -Pattern: - for len(pattern) > 0 { - var star bool - var chunk string - star, chunk, pattern = scanChunk(pattern) - if star && chunk == "" { - // Trailing * matches rest of string unless it has a /. - // return !strings.Contains(name, "/"), nil - - // Return rest of string - return true, nil - } - // Look for match at current position. - t, ok, err := matchChunk(chunk, name) - // if we're the last chunk, make sure we've exhausted the name - // otherwise we'll give a false result even if we could still match - // using the star - if ok && (len(t) == 0 || len(pattern) > 0) { - name = t - continue - } - if err != nil { - return false, err - } - if star { - // Look for match skipping i+1 bytes. - for i := 0; i < len(name); i++ { - t, ok, err := matchChunk(chunk, name[i+1:]) - if ok { - // if we're the last chunk, make sure we exhausted the name - if len(pattern) == 0 && len(t) > 0 { - continue - } - name = t - continue Pattern - } - if err != nil { - return false, err - } - } - } - return false, nil - } - return len(name) == 0, nil -} - -// scanChunk gets the next segment of pattern, which is a non-star string -// possibly preceded by a star. -func scanChunk(pattern string) (star bool, chunk, rest string) { - for len(pattern) > 0 && pattern[0] == '*' { - pattern = pattern[1:] - star = true - } - inrange := false - var i int -Scan: - for i = 0; i < len(pattern); i++ { - switch pattern[i] { - case '\\': - // error check handled in matchChunk: bad pattern. - if i+1 < len(pattern) { - i++ - } - case '[': - inrange = true - case ']': - inrange = false - case '*': - if !inrange { - break Scan - } - } - } - return star, pattern[0:i], pattern[i:] -} - -// matchChunk checks whether chunk matches the beginning of s. -// If so, it returns the remainder of s (after the match). -// Chunk is all single-character operators: literals, char classes, and ?. -func matchChunk(chunk, s string) (rest string, ok bool, err error) { - for len(chunk) > 0 { - if len(s) == 0 { - return - } - switch chunk[0] { - case '[': - // character class - r, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - // possibly negated - notNegated := true - if len(chunk) > 0 && chunk[0] == '^' { - notNegated = false - chunk = chunk[1:] - } - // parse all ranges - match := false - nrange := 0 - for { - if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { - chunk = chunk[1:] - break - } - var lo, hi rune - if lo, chunk, err = getEsc(chunk); err != nil { - return - } - hi = lo - if chunk[0] == '-' { - if hi, chunk, err = getEsc(chunk[1:]); err != nil { - return - } - } - if lo <= r && r <= hi { - match = true - } - nrange++ - } - if match != notNegated { - return - } - - case '?': - _, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - - case '\\': - chunk = chunk[1:] - if len(chunk) == 0 { - err = ErrBadPattern - return - } - fallthrough - - default: - if chunk[0] != s[0] { - return - } - s = s[1:] - chunk = chunk[1:] - } - } - return s, true, nil -} - -// getEsc gets a possibly-escaped character from chunk, for a character class. -func getEsc(chunk string) (r rune, nchunk string, err error) { - if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { - err = ErrBadPattern - return - } - if chunk[0] == '\\' { - chunk = chunk[1:] - if len(chunk) == 0 { - err = ErrBadPattern - return - } - } - r, n := utf8.DecodeRuneInString(chunk) - if r == utf8.RuneError && n == 1 { - err = ErrBadPattern - } - nchunk = chunk[n:] - if len(nchunk) == 0 { - err = ErrBadPattern - } - return -} diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/readme.md b/openshift/tools/vendor/github.com/drone/envsubst/v2/readme.md deleted file mode 100644 index ecc84828c1..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/readme.md +++ /dev/null @@ -1,44 +0,0 @@ -# envsubst - -`envsubst` is a Go package for expanding variables in a string using `${var}` syntax. -Includes support for bash string replacement functions. - -## Documentation - -[Documentation can be found on GoDoc][doc]. - -## Supported Functions - -| __Expression__ | __Meaning__ | -| ----------------- | -------------- | -| `${var}` | Value of `$var` -| `${#var}` | String length of `$var` -| `${var^}` | Uppercase first character of `$var` -| `${var^^}` | Uppercase all characters in `$var` -| `${var,}` | Lowercase first character of `$var` -| `${var,,}` | Lowercase all characters in `$var` -| `${var:n}` | Offset `$var` `n` characters from start -| `${var:n:len}` | Offset `$var` `n` characters with max length of `len` -| `${var#pattern}` | Strip shortest `pattern` match from start -| `${var##pattern}` | Strip longest `pattern` match from start -| `${var%pattern}` | Strip shortest `pattern` match from end -| `${var%%pattern}` | Strip longest `pattern` match from end -| `${var-default` | If `$var` is not set, evaluate expression as `$default` -| `${var:-default` | If `$var` is not set or is empty, evaluate expression as `$default` -| `${var=default` | If `$var` is not set, evaluate expression as `$default` -| `${var:=default` | If `$var` is not set or is empty, evaluate expression as `$default` -| `${var/pattern/replacement}` | Replace as few `pattern` matches as possible with `replacement` -| `${var//pattern/replacement}` | Replace as many `pattern` matches as possible with `replacement` -| `${var/#pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` start -| `${var/%pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` end - -For a deeper reference, see [bash-hackers](https://wiki.bash-hackers.org/syntax/pe#case_modification) or [gnu pattern matching](https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html). - -## Unsupported Functions - -* `${var-default}` -* `${var+default}` -* `${var:?default}` -* `${var:+default}` - -[doc]: http://godoc.org/github.com/drone/envsubst diff --git a/openshift/tools/vendor/github.com/drone/envsubst/v2/template.go b/openshift/tools/vendor/github.com/drone/envsubst/v2/template.go deleted file mode 100644 index 45d9abf8ac..0000000000 --- a/openshift/tools/vendor/github.com/drone/envsubst/v2/template.go +++ /dev/null @@ -1,157 +0,0 @@ -package envsubst - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/drone/envsubst/v2/parse" -) - -// state represents the state of template execution. It is not part of the -// template so that multiple executions can run in parallel. -type state struct { - template *Template - writer io.Writer - node parse.Node // current node - - // maps variable names to values - mapper func(string) string -} - -// Template is the representation of a parsed shell format string. -type Template struct { - tree *parse.Tree -} - -// Parse creates a new shell format template and parses the template -// definition from string s. -func Parse(s string) (t *Template, err error) { - t = new(Template) - t.tree, err = parse.Parse(s) - if err != nil { - return nil, err - } - return t, nil -} - -// ParseFile creates a new shell format template and parses the template -// definition from the named file. -func ParseFile(path string) (*Template, error) { - b, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - return Parse(string(b)) -} - -// Execute applies a parsed template to the specified data mapping. -func (t *Template) Execute(mapping func(string) string) (str string, err error) { - b := new(bytes.Buffer) - s := new(state) - s.node = t.tree.Root - s.mapper = mapping - s.writer = b - err = t.eval(s) - if err != nil { - return - } - return b.String(), nil -} - -func (t *Template) eval(s *state) (err error) { - switch node := s.node.(type) { - case *parse.TextNode: - err = t.evalText(s, node) - case *parse.FuncNode: - err = t.evalFunc(s, node) - case *parse.ListNode: - err = t.evalList(s, node) - } - return err -} - -func (t *Template) evalText(s *state, node *parse.TextNode) error { - _, err := io.WriteString(s.writer, node.Value) - return err -} - -func (t *Template) evalList(s *state, node *parse.ListNode) (err error) { - for _, n := range node.Nodes { - s.node = n - err = t.eval(s) - if err != nil { - return err - } - } - return nil -} - -func (t *Template) evalFunc(s *state, node *parse.FuncNode) error { - var w = s.writer - var buf bytes.Buffer - var args []string - for _, n := range node.Args { - buf.Reset() - s.writer = &buf - s.node = n - err := t.eval(s) - if err != nil { - return err - } - args = append(args, buf.String()) - } - - // restore the origin writer - s.writer = w - s.node = node - - v := s.mapper(node.Param) - - fn := lookupFunc(node.Name, len(args)) - - _, err := io.WriteString(s.writer, fn(v, args...)) - return err -} - -// lookupFunc returns the parameters substitution function by name. If the -// named function does not exists, a default function is returned. -func lookupFunc(name string, args int) substituteFunc { - switch name { - case ",": - return toLowerFirst - case ",,": - return toLower - case "^": - return toUpperFirst - case "^^": - return toUpper - case "#": - if args == 0 { - return toLen - } - return trimShortestPrefix - case "##": - return trimLongestPrefix - case "%": - return trimShortestSuffix - case "%%": - return trimLongestSuffix - case ":": - return toSubstr - case "/#": - return replacePrefix - case "/%": - return replaceSuffix - case "/": - return replaceFirst - case "//": - return replaceAll - case "=", ":=", ":-": - return toDefault - case ":?", ":+", "-", "+": - return toDefault - default: - return toDefault - } -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/openshift/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml deleted file mode 100644 index f4e7dbf37b..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ /dev/null @@ -1,14 +0,0 @@ -freebsd_task: - name: 'FreeBSD' - freebsd_instance: - image_family: freebsd-14-1 - install_script: - - pkg update -f - - pkg install -y go - test_script: - # run tests as user "cirrus" instead of root - - pw useradd cirrus -m - - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/.gitignore b/openshift/tools/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index daea9dd6d6..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# go test -c output -*.test -*.test.exe - -# Output of go build ./cmd/fsnotify -/fsnotify -/fsnotify.exe - -/test/kqueue -/test/a.out diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/.mailmap b/openshift/tools/vendor/github.com/fsnotify/fsnotify/.mailmap deleted file mode 100644 index a04f2907fe..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/.mailmap +++ /dev/null @@ -1,2 +0,0 @@ -Chris Howey -Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/openshift/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index fa854785d0..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,569 +0,0 @@ -# Changelog - -1.8.0 2023-10-31 ----------------- - -### Additions - -- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) - -### Changes and fixes - -- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) - -- kqueue: ignore events with Ident=0 ([#590]) - -- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) - -- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) - -- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) - -- inotify: fix panic when calling Remove() in a goroutine ([#650]) - -- fen: allow watching subdirectories of watched directories ([#621]) - -[#590]: https://github.com/fsnotify/fsnotify/pull/590 -[#610]: https://github.com/fsnotify/fsnotify/pull/610 -[#617]: https://github.com/fsnotify/fsnotify/pull/617 -[#619]: https://github.com/fsnotify/fsnotify/pull/619 -[#620]: https://github.com/fsnotify/fsnotify/pull/620 -[#621]: https://github.com/fsnotify/fsnotify/pull/621 -[#625]: https://github.com/fsnotify/fsnotify/pull/625 -[#650]: https://github.com/fsnotify/fsnotify/pull/650 - -1.7.0 - 2023-10-22 ------------------- -This version of fsnotify needs Go 1.17. - -### Additions - -- illumos: add FEN backend to support illumos and Solaris. ([#371]) - -- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful - in cases where you can't control the kernel buffer and receive a large number - of events in bursts. ([#550], [#572]) - -- all: add `AddWith()`, which is identical to `Add()` but allows passing - options. ([#521]) - -- windows: allow setting the ReadDirectoryChangesW() buffer size with - `fsnotify.WithBufferSize()`; the default of 64K is the highest value that - works on all platforms and is enough for most purposes, but in some cases a - highest buffer is needed. ([#521]) - -### Changes and fixes - -- inotify: remove watcher if a watched path is renamed ([#518]) - - After a rename the reported name wasn't updated, or even an empty string. - Inotify doesn't provide any good facilities to update it, so just remove the - watcher. This is already how it worked on kqueue and FEN. - - On Windows this does work, and remains working. - -- windows: don't listen for file attribute changes ([#520]) - - File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, - with no way to see if they're a file write or attribute change, so would show - up as a fsnotify.Write event. This is never useful, and could result in many - spurious Write events. - -- windows: return `ErrEventOverflow` if the buffer is full ([#525]) - - Before it would merely return "short read", making it hard to detect this - error. - -- kqueue: make sure events for all files are delivered properly when removing a - watched directory ([#526]) - - Previously they would get sent with `""` (empty string) or `"."` as the path - name. - -- kqueue: don't emit spurious Create events for symbolic links ([#524]) - - The link would get resolved but kqueue would "forget" it already saw the link - itself, resulting on a Create for every Write event for the directory. - -- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) - -- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in - `backend_other.go`, making it easier to use on unsupported platforms such as - WASM, AIX, etc. ([#528]) - -- other: use the `backend_other.go` no-op if the `appengine` build tag is set; - Google AppEngine forbids usage of the unsafe package so the inotify backend - won't compile there. - -[#371]: https://github.com/fsnotify/fsnotify/pull/371 -[#516]: https://github.com/fsnotify/fsnotify/pull/516 -[#518]: https://github.com/fsnotify/fsnotify/pull/518 -[#520]: https://github.com/fsnotify/fsnotify/pull/520 -[#521]: https://github.com/fsnotify/fsnotify/pull/521 -[#524]: https://github.com/fsnotify/fsnotify/pull/524 -[#525]: https://github.com/fsnotify/fsnotify/pull/525 -[#526]: https://github.com/fsnotify/fsnotify/pull/526 -[#528]: https://github.com/fsnotify/fsnotify/pull/528 -[#537]: https://github.com/fsnotify/fsnotify/pull/537 -[#550]: https://github.com/fsnotify/fsnotify/pull/550 -[#572]: https://github.com/fsnotify/fsnotify/pull/572 - -1.6.0 - 2022-10-13 ------------------- -This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, -but not documented). It also increases the minimum Linux version to 2.6.32. - -### Additions - -- all: add `Event.Has()` and `Op.Has()` ([#477]) - - This makes checking events a lot easier; for example: - - if event.Op&Write == Write && !(event.Op&Remove == Remove) { - } - - Becomes: - - if event.Has(Write) && !event.Has(Remove) { - } - -- all: add cmd/fsnotify ([#463]) - - A command-line utility for testing and some examples. - -### Changes and fixes - -- inotify: don't ignore events for files that don't exist ([#260], [#470]) - - Previously the inotify watcher would call `os.Lstat()` to check if a file - still exists before emitting events. - - This was inconsistent with other platforms and resulted in inconsistent event - reporting (e.g. when a file is quickly removed and re-created), and generally - a source of confusion. It was added in 2013 to fix a memory leak that no - longer exists. - -- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's - not watched ([#460]) - -- inotify: replace epoll() with non-blocking inotify ([#434]) - - Non-blocking inotify was not generally available at the time this library was - written in 2014, but now it is. As a result, the minimum Linux version is - bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. - -- kqueue: don't check for events every 100ms ([#480]) - - The watcher would wake up every 100ms, even when there was nothing to do. Now - it waits until there is something to do. - -- macos: retry opening files on EINTR ([#475]) - -- kqueue: skip unreadable files ([#479]) - - kqueue requires a file descriptor for every file in a directory; this would - fail if a file was unreadable by the current user. Now these files are simply - skipped. - -- windows: fix renaming a watched directory if the parent is also watched ([#370]) - -- windows: increase buffer size from 4K to 64K ([#485]) - -- windows: close file handle on Remove() ([#288]) - -- kqueue: put pathname in the error if watching a file fails ([#471]) - -- inotify, windows: calling Close() more than once could race ([#465]) - -- kqueue: improve Close() performance ([#233]) - -- all: various documentation additions and clarifications. - -[#233]: https://github.com/fsnotify/fsnotify/pull/233 -[#260]: https://github.com/fsnotify/fsnotify/pull/260 -[#288]: https://github.com/fsnotify/fsnotify/pull/288 -[#370]: https://github.com/fsnotify/fsnotify/pull/370 -[#434]: https://github.com/fsnotify/fsnotify/pull/434 -[#460]: https://github.com/fsnotify/fsnotify/pull/460 -[#463]: https://github.com/fsnotify/fsnotify/pull/463 -[#465]: https://github.com/fsnotify/fsnotify/pull/465 -[#470]: https://github.com/fsnotify/fsnotify/pull/470 -[#471]: https://github.com/fsnotify/fsnotify/pull/471 -[#475]: https://github.com/fsnotify/fsnotify/pull/475 -[#477]: https://github.com/fsnotify/fsnotify/pull/477 -[#479]: https://github.com/fsnotify/fsnotify/pull/479 -[#480]: https://github.com/fsnotify/fsnotify/pull/480 -[#485]: https://github.com/fsnotify/fsnotify/pull/485 - -## [1.5.4] - 2022-04-25 - -* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) -* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) -* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) - -## [1.5.3] - 2022-04-22 - -* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) - -## [1.5.2] - 2022-04-21 - -* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) -* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) -* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) -* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) -* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) - -## [1.5.1] - 2021-08-24 - -* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) - -## [1.5.0] - 2021-08-20 - -* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) -* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) -* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) -* CI: Use GitHub Actions for CI and cover go 1.12-1.17 - [#378](https://github.com/fsnotify/fsnotify/pull/378) - [#381](https://github.com/fsnotify/fsnotify/pull/381) - [#385](https://github.com/fsnotify/fsnotify/pull/385) -* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) - -## [1.4.9] - 2020-03-11 - -* Move example usage to the readme #329. This may resolve #328. - -## [1.4.8] - 2020-03-10 - -* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) -* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) -* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) -* CI: Less verbosity (@nathany #267) -* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) -* Tests: Check if channels are closed in the example (@alexeykazakov #244) -* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) -* CI: Add windows to travis matrix (@cpuguy83 #284) -* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) -* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) -* Linux: open files with close-on-exec (@linxiulei #273) -* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) -* Project: Add go.mod (@nathany #309) -* Project: Revise editor config (@nathany #309) -* Project: Update copyright for 2019 (@nathany #309) -* CI: Drop go1.8 from CI matrix (@nathany #309) -* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) - -## [1.4.7] - 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## [1.4.2] - 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## [1.4.1] - 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## [1.4.0] - 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## [1.3.1] - 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## [1.3.0] - 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## [1.2.10] - 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## [1.2.9] - 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## [1.2.8] - 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## [1.2.5] - 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## [1.2.1] - 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## [1.2.0] - 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## [1.1.1] - 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## [1.1.0] - 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [1.0.4] - 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## [1.0.3] - 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## [1.0.2] - 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## [1.0.0] - 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## [0.9.3] - 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [0.9.2] - 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## [0.9.1] - 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## [0.9.0] - 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## [0.8.12] - 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## [0.8.11] - 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## [0.8.10] - 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## [0.8.9] - 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## [0.8.8] - 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## [0.8.7] - 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## [0.8.6] - 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## [0.8.5] - 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## [0.8.4] - 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## [0.8.3] - 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## [0.8.2] - 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## [0.8.1] - 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## [0.8.0] - 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## [0.7.4] - 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## [0.7.3] - 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## [0.7.2] - 2012-09-01 - -* kqueue: events for created directories - -## [0.7.1] - 2012-07-14 - -* [Fix] for renaming files - -## [0.7.0] - 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## [0.6.0] - 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## [0.5.1] - 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## [0.5.0] - 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## [0.4.0] - 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## [0.3.0] - 2012-02-19 - -* kqueue: add files when watch directory - -## [0.2.0] - 2011-12-30 - -* update to latest Go weekly code - -## [0.1.0] - 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/openshift/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index e4ac2a2fff..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,144 +0,0 @@ -Thank you for your interest in contributing to fsnotify! We try to review and -merge PRs in a reasonable timeframe, but please be aware that: - -- To avoid "wasted" work, please discuss changes on the issue tracker first. You - can just send PRs, but they may end up being rejected for one reason or the - other. - -- fsnotify is a cross-platform library, and changes must work reasonably well on - all supported platforms. - -- Changes will need to be compatible; old code should still compile, and the - runtime behaviour can't change in ways that are likely to lead to problems for - users. - -Testing -------- -Just `go test ./...` runs all the tests; the CI runs this on all supported -platforms. Testing different platforms locally can be done with something like -[goon] or [Vagrant], but this isn't super-easy to set up at the moment. - -Use the `-short` flag to make the "stress test" run faster. - -Writing new tests ------------------ -Scripts in the testdata directory allow creating test cases in a "shell-like" -syntax. The basic format is: - - script - - Output: - desired output - -For example: - - # Create a new empty file with some data. - watch / - echo data >/file - - Output: - create /file - write /file - -Just create a new file to add a new test; select which tests to run with -`-run TestScript/[path]`. - -script ------- -The script is a "shell-like" script: - - cmd arg arg - -Comments are supported with `#`: - - # Comment - cmd arg arg # Comment - -All operations are done in a temp directory; a path like "/foo" is rewritten to -"/tmp/TestFoo/foo". - -Arguments can be quoted with `"` or `'`; there are no escapes and they're -functionally identical right now, but this may change in the future, so best to -assume shell-like rules. - - touch "/file with spaces" - -End-of-line escapes with `\` are not supported. - -### Supported commands - - watch path [ops] # Watch the path, reporting events for it. Nothing is - # watched by default. Optionally a list of ops can be - # given, as with AddWith(path, WithOps(...)). - unwatch path # Stop watching the path. - watchlist n # Assert watchlist length. - - stop # Stop running the script; for debugging. - debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in - parallel by default, so -parallel=1 is probably a good - idea). - - touch path - mkdir [-p] dir - ln -s target link # Only ln -s supported. - mkfifo path - mknod dev path - mv src dst - rm [-r] path - chmod mode path # Octal only - sleep time-in-ms - - cat path # Read path (does nothing with the data; just reads it). - echo str >>path # Append "str" to "path". - echo str >path # Truncate "path" and write "str". - - require reason # Skip the test if "reason" is true; "skip" and - skip reason # "require" behave identical; it supports both for - # readability. Possible reasons are: - # - # always Always skip this test. - # symlink Symlinks are supported (requires admin - # permissions on Windows). - # mkfifo Platform doesn't support FIFO named sockets. - # mknod Platform doesn't support device nodes. - - -output ------- -After `Output:` the desired output is given; this is indented by convention, but -that's not required. - -The format of that is: - - # Comment - event path # Comment - - system: - event path - system2: - event path - -Every event is one line, and any whitespace between the event and path are -ignored. The path can optionally be surrounded in ". Anything after a "#" is -ignored. - -Platform-specific tests can be added after GOOS; for example: - - watch / - touch /file - - Output: - # Tested if nothing else matches - create /file - - # Windows-specific test. - windows: - write /file - -You can specify multiple platforms with a comma (e.g. "windows, linux:"). -"kqueue" is a shortcut for all kqueue systems (BSD, macOS). - - -[goon]: https://github.com/arp242/goon -[Vagrant]: https://www.vagrantup.com/ -[integration_test.go]: /integration_test.go diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/LICENSE b/openshift/tools/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index fb03ade750..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright © 2012 The Go Authors. All rights reserved. -Copyright © fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name of Google Inc. nor the names of its contributors may be used - to endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/README.md b/openshift/tools/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index e480733d16..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,184 +0,0 @@ -fsnotify is a Go library to provide cross-platform filesystem notifications on -Windows, Linux, macOS, BSD, and illumos. - -Go 1.17 or newer is required; the full documentation is at -https://pkg.go.dev/github.com/fsnotify/fsnotify - ---- - -Platform support: - -| Backend | OS | Status | -| :-------------------- | :--------- | :------------------------------------------------------------------------ | -| inotify | Linux | Supported | -| kqueue | BSD, macOS | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FEN | illumos | Supported | -| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | -| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | -| USN Journals | Windows | [Needs support in x/sys/windows][usn] | -| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | - -Linux and illumos should include Android and Solaris, but these are currently -untested. - -[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 -[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 - -Usage ------ -A basic example: - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - // Create new watcher. - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - // Start listening for events. - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Has(fsnotify.Write) { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - // Add a path. - err = watcher.Add("/tmp") - if err != nil { - log.Fatal(err) - } - - // Block main goroutine forever. - <-make(chan struct{}) -} -``` - -Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be -run with: - - % go run ./cmd/fsnotify - -Further detailed documentation can be found in godoc: -https://pkg.go.dev/github.com/fsnotify/fsnotify - -FAQ ---- -### Will a file still be watched when it's moved to another directory? -No, not unless you are watching the location it was moved to. - -### Are subdirectories watched? -No, you must add watches for any directory you want to watch (a recursive -watcher is on the roadmap: [#18]). - -[#18]: https://github.com/fsnotify/fsnotify/issues/18 - -### Do I have to watch the Error and Event channels in a goroutine? -Yes. You can read both channels in the same goroutine using `select` (you don't -need a separate goroutine for both channels; see the example). - -### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? -fsnotify requires support from underlying OS to work. The current NFS and SMB -protocols does not provide network level support for file notifications, and -neither do the /proc and /sys virtual filesystems. - -This could be fixed with a polling watcher ([#9]), but it's not yet implemented. - -[#9]: https://github.com/fsnotify/fsnotify/issues/9 - -### Why do I get many Chmod events? -Some programs may generate a lot of attribute changes; for example Spotlight on -macOS, anti-virus programs, backup applications, and some others are known to do -this. As a rule, it's typically best to ignore Chmod events. They're often not -useful, and tend to cause problems. - -Spotlight indexing on macOS can result in multiple events (see [#15]). A -temporary workaround is to add your folder(s) to the *Spotlight Privacy -settings* until we have a native FSEvents implementation (see [#11]). - -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#15]: https://github.com/fsnotify/fsnotify/issues/15 - -### Watching a file doesn't work well -Watching individual files (rather than directories) is generally not recommended -as many programs (especially editors) update files atomically: it will write to -a temporary file which is then moved to to destination, overwriting the original -(or some variant thereof). The watcher on the original file is now lost, as that -no longer exists. - -The upshot of this is that a power failure or crash won't leave a half-written -file. - -Watch the parent directory and use `Event.Name` to filter out files you're not -interested in. There is an example of this in `cmd/fsnotify/file.go`. - -Platform-specific notes ------------------------ -### Linux -When a file is removed a REMOVE event won't be emitted until all file -descriptors are closed; it will emit a CHMOD instead: - - fp := os.Open("file") - os.Remove("file") // CHMOD - fp.Close() // REMOVE - -This is the event that inotify sends, so not much can be changed about this. - -The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for -the number of watches per user, and `fs.inotify.max_user_instances` specifies -the maximum number of inotify instances per user. Every Watcher you create is an -"instance", and every path you add is a "watch". - -These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and -`/proc/sys/fs/inotify/max_user_instances` - -To increase them you can use `sysctl` or write the value to proc file: - - # The default values on Linux 5.18 - sysctl fs.inotify.max_user_watches=124983 - sysctl fs.inotify.max_user_instances=128 - -To make the changes persist on reboot edit `/etc/sysctl.conf` or -`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your -distro's documentation): - - fs.inotify.max_user_watches=124983 - fs.inotify.max_user_instances=128 - -Reaching the limit will result in a "no space left on device" or "too many open -files" error. - -### kqueue (macOS, all BSD systems) -kqueue requires opening a file descriptor for every file that's being watched; -so if you're watching a directory with five files then that's six file -descriptors. You will run in to your system's "max open files" limit faster on -these platforms. - -The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to -control the maximum number of open files. diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go deleted file mode 100644 index c349c326c7..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ /dev/null @@ -1,484 +0,0 @@ -//go:build solaris - -// FEN backend for illumos (supported) and Solaris (untested, but should work). -// -// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - "time" - - "github.com/fsnotify/fsnotify/internal" - "golang.org/x/sys/unix" -) - -type fen struct { - Events chan Event - Errors chan error - - mu sync.Mutex - port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]Op // Explicitly watched directories - watches map[string]Op // Explicitly watched non-directories -} - -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} - -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - w := &fen{ - Events: ev, - Errors: errs, - dirs: make(map[string]Op), - watches: make(map[string]Op), - done: make(chan struct{}), - } - - var err error - w.port, err = unix.NewEventPort() - if err != nil { - return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) - } - - go w.readEvents() - return w, nil -} - -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendEvent(name string, op Op) (sent bool) { - select { - case <-w.done: - return false - case w.Events <- Event{Name: name, Op: op}: - return true - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendError(err error) (sent bool) { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *fen) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -func (w *fen) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { - return nil - } - close(w.done) - return w.port.Close() -} - -func (w *fen) Add(name string) error { return w.AddWith(name) } - -func (w *fen) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", - time.Now().Format("15:04:05.000000000"), name) - } - - with := getOptions(opts...) - if !w.xSupports(with.op) { - return fmt.Errorf("%w: %s", xErrUnsupported, with.op) - } - - // Currently we resolve symlinks that were explicitly requested to be - // watched. Otherwise we would use LStat here. - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Associate all files in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, true, w.associateFile) - if err != nil { - return err - } - - w.mu.Lock() - w.dirs[name] = with.op - w.mu.Unlock() - return nil - } - - err = w.associateFile(name, stat, true) - if err != nil { - return err - } - - w.mu.Lock() - w.watches[name] = with.op - w.mu.Unlock() - return nil -} - -func (w *fen) Remove(name string) error { - if w.isClosed() { - return nil - } - if !w.port.PathIsWatched(name) { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", - time.Now().Format("15:04:05.000000000"), name) - } - - // The user has expressed an intent. Immediately remove this name from - // whichever watch list it might be in. If it's not in there the delete - // doesn't cause harm. - w.mu.Lock() - delete(w.watches, name) - delete(w.dirs, name) - w.mu.Unlock() - - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Remove associations for every file in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, false, w.dissociateFile) - if err != nil { - return err - } - return nil - } - - err = w.port.DissociatePath(name) - if err != nil { - return err - } - - return nil -} - -// readEvents contains the main loop that runs in a goroutine watching for events. -func (w *fen) readEvents() { - // If this function returns, the watcher has been closed and we can close - // these channels - defer func() { - close(w.Errors) - close(w.Events) - }() - - pevents := make([]unix.PortEvent, 8) - for { - count, err := w.port.Get(pevents, 1, nil) - if err != nil && err != unix.ETIME { - // Interrupted system call (count should be 0) ignore and continue - if errors.Is(err, unix.EINTR) && count == 0 { - continue - } - // Get failed because we called w.Close() - if errors.Is(err, unix.EBADF) && w.isClosed() { - return - } - // There was an error not caused by calling w.Close() - if !w.sendError(err) { - return - } - } - - p := pevents[:count] - for _, pevent := range p { - if pevent.Source != unix.PORT_SOURCE_FILE { - // Event from unexpected source received; should never happen. - if !w.sendError(errors.New("Event from unexpected source received")) { - return - } - continue - } - - if debug { - internal.Debug(pevent.Path, pevent.Events) - } - - err = w.handleEvent(&pevent) - if !w.sendError(err) { - return - } - } - } -} - -func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { - files, err := os.ReadDir(path) - if err != nil { - return err - } - - // Handle all children of the directory. - for _, entry := range files { - finfo, err := entry.Info() - if err != nil { - return err - } - err = handler(filepath.Join(path, finfo.Name()), finfo, false) - if err != nil { - return err - } - } - - // And finally handle the directory itself. - return handler(path, stat, follow) -} - -// handleEvent might need to emit more than one fsnotify event if the events -// bitmap matches more than one event type (e.g. the file was both modified and -// had the attributes changed between when the association was created and the -// when event was returned) -func (w *fen) handleEvent(event *unix.PortEvent) error { - var ( - events = event.Events - path = event.Path - fmode = event.Cookie.(os.FileMode) - reRegister = true - ) - - w.mu.Lock() - _, watchedDir := w.dirs[path] - _, watchedPath := w.watches[path] - w.mu.Unlock() - isWatched := watchedDir || watchedPath - - if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { - return nil - } - reRegister = false - } - if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { - return nil - } - // Don't keep watching the new file name - reRegister = false - } - if events&unix.FILE_RENAME_TO != 0 { - // We don't report a Rename event for this case, because Rename events - // are interpreted as referring to the _old_ name of the file, and in - // this case the event would refer to the new name of the file. This - // type of rename event is not supported by fsnotify. - - // inotify reports a Remove event in this case, so we simulate this - // here. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't keep watching the file that was removed - reRegister = false - } - - // The file is gone, nothing left to do. - if !reRegister { - if watchedDir { - w.mu.Lock() - delete(w.dirs, path) - w.mu.Unlock() - } - if watchedPath { - w.mu.Lock() - delete(w.watches, path) - w.mu.Unlock() - } - return nil - } - - // If we didn't get a deletion the file still exists and we're going to have - // to watch it again. Let's Stat it now so that we can compare permissions - // and have what we need to continue watching the file - - stat, err := os.Lstat(path) - if err != nil { - // This is unexpected, but we should still emit an event. This happens - // most often on "rm -r" of a subdirectory inside a watched directory We - // get a modify event of something happening inside, but by the time we - // get here, the sudirectory is already gone. Clearly we were watching - // this path but now it is gone. Let's tell the user that it was - // removed. - if !w.sendEvent(path, Remove) { - return nil - } - // Suppress extra write events on removed directories; they are not - // informative and can be confusing. - return nil - } - - // resolve symlinks that were explicitly watched as we would have at Add() - // time. this helps suppress spurious Chmod events on watched symlinks - if isWatched { - stat, err = os.Stat(path) - if err != nil { - // The symlink still exists, but the target is gone. Report the - // Remove similar to above. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't return the error - } - } - - if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() && watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } - } - } - if events&unix.FILE_ATTRIB != 0 && stat != nil { - // Only send Chmod if perms changed - if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { - return nil - } - } - } - - if stat != nil { - // If we get here, it means we've hit an event above that requires us to - // continue watching the file or directory - return w.associateFile(path, stat, isWatched) - } - return nil -} - -func (w *fen) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. - files, err := os.ReadDir(path) - if err != nil { - return err - } - - for _, entry := range files { - path := filepath.Join(path, entry.Name()) - if w.port.PathIsWatched(path) { - continue - } - - finfo, err := entry.Info() - if err != nil { - return err - } - err = w.associateFile(path, finfo, false) - if !w.sendError(err) { - return nil - } - if !w.sendEvent(path, Create) { - return nil - } - } - return nil -} - -func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { - if w.isClosed() { - return ErrClosed - } - // This is primarily protecting the call to AssociatePath but it is - // important and intentional that the call to PathIsWatched is also - // protected by this mutex. Without this mutex, AssociatePath has been seen - // to error out that the path is already associated. - w.mu.Lock() - defer w.mu.Unlock() - - if w.port.PathIsWatched(path) { - // Remove the old association in favor of this one If we get ENOENT, - // then while the x/sys/unix wrapper still thought that this path was - // associated, the underlying event port did not. This call will have - // cleared up that discrepancy. The most likely cause is that the event - // has fired but we haven't processed it yet. - err := w.port.DissociatePath(path) - if err != nil && !errors.Is(err, unix.ENOENT) { - return err - } - } - - var events int - if !follow { - // Watch symlinks themselves rather than their targets unless this entry - // is explicitly watched. - events |= unix.FILE_NOFOLLOW - } - if true { // TODO: implement withOps() - events |= unix.FILE_MODIFIED - } - if true { - events |= unix.FILE_ATTRIB - } - return w.port.AssociatePath(path, stat, events, stat.Mode()) -} - -func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { - if !w.port.PathIsWatched(path) { - return nil - } - return w.port.DissociatePath(path) -} - -func (w *fen) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)+len(w.dirs)) - for pathname := range w.dirs { - entries = append(entries, pathname) - } - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -func (w *fen) xSupports(op Op) bool { - if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || - op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { - return false - } - return true -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go deleted file mode 100644 index 36c311694c..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ /dev/null @@ -1,658 +0,0 @@ -//go:build linux && !appengine - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "io/fs" - "os" - "path/filepath" - "strings" - "sync" - "time" - "unsafe" - - "github.com/fsnotify/fsnotify/internal" - "golang.org/x/sys/unix" -) - -type inotify struct { - Events chan Event - Errors chan error - - // Store fd here as os.File.Read() will no longer return on close after - // calling Fd(). See: https://github.com/golang/go/issues/26439 - fd int - inotifyFile *os.File - watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneMu sync.Mutex - doneResp chan struct{} // Channel to respond to Close - - // Store rename cookies in an array, with the index wrapping to 0. Almost - // all of the time what we get is a MOVED_FROM to set the cookie and the - // next event inotify sends will be MOVED_TO to read it. However, this is - // not guaranteed – as described in inotify(7) – and we may get other events - // between the two MOVED_* events (including other MOVED_* ones). - // - // A second issue is that moving a file outside the watched directory will - // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to - // read and delete it. So just storing it in a map would slowly leak memory. - // - // Doing it like this gives us a simple fast LRU-cache that won't allocate. - // Ten items should be more than enough for our purpose, and a loop over - // such a short array is faster than a map access anyway (not that it hugely - // matters since we're talking about hundreds of ns at the most, but still). - cookies [10]koekje - cookieIndex uint8 - cookiesMu sync.Mutex -} - -type ( - watches struct { - mu sync.RWMutex - wd map[uint32]*watch // wd → watch - path map[string]uint32 // pathname → wd - } - watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. - recurse bool // Recursion with ./...? - } - koekje struct { - cookie uint32 - path string - } -) - -func newWatches() *watches { - return &watches{ - wd: make(map[uint32]*watch), - path: make(map[string]uint32), - } -} - -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - watch := w.wd[wd] // Could have had Remove() called. See #616. - if watch == nil { - return - } - delete(w.path, watch.path) - delete(w.wd, wd) -} - -func (w *watches) removePath(path string) ([]uint32, error) { - w.mu.Lock() - defer w.mu.Unlock() - - path, recurse := recursivePath(path) - wd, ok := w.path[path] - if !ok { - return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) - } - - watch := w.wd[wd] - if recurse && !watch.recurse { - return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) - } - - delete(w.path, path) - delete(w.wd, wd) - if !watch.recurse { - return []uint32{wd}, nil - } - - wds := make([]uint32, 0, 8) - wds = append(wds, wd) - for p, rwd := range w.path { - if filepath.HasPrefix(p, path) { - delete(w.path, p) - delete(w.wd, rwd) - wds = append(wds, rwd) - } - } - return wds, nil -} - -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - -func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - - var existing *watch - wd, ok := w.path[path] - if ok { - existing = w.wd[wd] - } - - upd, err := f(existing) - if err != nil { - return err - } - if upd != nil { - w.wd[upd.wd] = upd - w.path[upd.path] = upd.wd - - if upd.wd != wd { - delete(w.wd, wd) - } - } - - return nil -} - -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} - -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - // Need to set nonblocking mode for SetDeadline to work, otherwise blocking - // I/O operations won't terminate on close. - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) - if fd == -1 { - return nil, errno - } - - w := &inotify{ - Events: ev, - Errors: errs, - fd: fd, - inotifyFile: os.NewFile(uintptr(fd), ""), - watches: newWatches(), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *inotify) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *inotify) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *inotify) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -func (w *inotify) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() - return nil - } - close(w.done) - w.doneMu.Unlock() - - // Causes any blocking reads to return with an error, provided the file - // still supports deadline operations. - err := w.inotifyFile.Close() - if err != nil { - return err - } - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -func (w *inotify) Add(name string) error { return w.AddWith(name) } - -func (w *inotify) AddWith(path string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", - time.Now().Format("15:04:05.000000000"), path) - } - - with := getOptions(opts...) - if !w.xSupports(with.op) { - return fmt.Errorf("%w: %s", xErrUnsupported, with.op) - } - - path, recurse := recursivePath(path) - if recurse { - return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if !d.IsDir() { - if root == path { - return fmt.Errorf("fsnotify: not a directory: %q", path) - } - return nil - } - - // Send a Create event when adding new directory from a recursive - // watch; this is for "mkdir -p one/two/three". Usually all those - // directories will be created before we can set up watchers on the - // subdirectories, so only "one" would be sent as a Create event and - // not "one/two" and "one/two/three" (inotifywait -r has the same - // problem). - if with.sendCreate && root != path { - w.sendEvent(Event{Name: root, Op: Create}) - } - - return w.add(root, with, true) - }) - } - - return w.add(path, with, false) -} - -func (w *inotify) add(path string, with withOpts, recurse bool) error { - var flags uint32 - if with.noFollow { - flags |= unix.IN_DONT_FOLLOW - } - if with.op.Has(Create) { - flags |= unix.IN_CREATE - } - if with.op.Has(Write) { - flags |= unix.IN_MODIFY - } - if with.op.Has(Remove) { - flags |= unix.IN_DELETE | unix.IN_DELETE_SELF - } - if with.op.Has(Rename) { - flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF - } - if with.op.Has(Chmod) { - flags |= unix.IN_ATTRIB - } - if with.op.Has(xUnportableOpen) { - flags |= unix.IN_OPEN - } - if with.op.Has(xUnportableRead) { - flags |= unix.IN_ACCESS - } - if with.op.Has(xUnportableCloseWrite) { - flags |= unix.IN_CLOSE_WRITE - } - if with.op.Has(xUnportableCloseRead) { - flags |= unix.IN_CLOSE_NOWRITE - } - return w.register(path, flags, recurse) -} - -func (w *inotify) register(path string, flags uint32, recurse bool) error { - return w.watches.updatePath(path, func(existing *watch) (*watch, error) { - if existing != nil { - flags |= existing.flags | unix.IN_MASK_ADD - } - - wd, err := unix.InotifyAddWatch(w.fd, path, flags) - if wd == -1 { - return nil, err - } - - if existing == nil { - return &watch{ - wd: uint32(wd), - path: path, - flags: flags, - recurse: recurse, - }, nil - } - - existing.wd = uint32(wd) - existing.flags = flags - return existing, nil - }) -} - -func (w *inotify) Remove(name string) error { - if w.isClosed() { - return nil - } - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", - time.Now().Format("15:04:05.000000000"), name) - } - return w.remove(filepath.Clean(name)) -} - -func (w *inotify) remove(name string) error { - wds, err := w.watches.removePath(name) - if err != nil { - return err - } - - for _, wd := range wds { - _, err := unix.InotifyRmWatch(w.fd, wd) - if err != nil { - // TODO: Perhaps it's not helpful to return an error here in every - // case; the only two possible errors are: - // - // EBADF, which happens when w.fd is not a valid file descriptor of - // any kind. - // - // EINVAL, which is when fd is not an inotify descriptor or wd is - // not a valid watch descriptor. Watch descriptors are invalidated - // when they are removed explicitly or implicitly; explicitly by - // inotify_rm_watch, implicitly when the file they are watching is - // deleted. - return err - } - } - return nil -} - -func (w *inotify) WatchList() []string { - if w.isClosed() { - return nil - } - - entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() - for pathname := range w.watches.path { - entries = append(entries, pathname) - } - w.watches.mu.RUnlock() - - return entries -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *inotify) readEvents() { - defer func() { - close(w.doneResp) - close(w.Errors) - close(w.Events) - }() - - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) - for { - // See if we have been closed. - if w.isClosed() { - return - } - - n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: - if !w.sendError(err) { - return - } - continue - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. - } - if !w.sendError(err) { - return - } - continue - } - - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - var offset uint32 - for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - // Move to the next event in the buffer - next = func() { offset += unix.SizeofInotifyEvent + nameLen } - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { - if !w.sendError(ErrEventOverflow) { - return - } - } - - /// If the event happened to the watched directory or the watched - /// file, the kernel doesn't append the filename to the event, but - /// we would like to always fill the the "Name" field with a valid - /// filename. We retrieve the path of the watch from the "paths" - /// map. - watch := w.watches.byWd(uint32(raw.Wd)) - /// Can be nil if Remove() was called in another goroutine for this - /// path inbetween reading the events from the kernel and reading - /// the internal state. Not much we can do about it, so just skip. - /// See #616. - if watch == nil { - next() - continue - } - - name := watch.path - if nameLen > 0 { - /// Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - /// The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - if debug { - internal.Debug(name, raw.Mask, raw.Cookie) - } - - if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 - next() - continue - } - - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } - - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - if watch.recurse { - next() // Do nothing - continue - } - - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } - } - - /// Skip if we're watching both this path and the parent; the parent - /// will already send a delete so no need to do it twice. - if mask&unix.IN_DELETE_SELF != 0 { - if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { - next() - continue - } - } - - ev := w.newEvent(name, mask, raw.Cookie) - // Need to update watch path for recurse. - if watch.recurse { - isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR - /// New directory created: set up watch on it. - if isDir && ev.Has(Create) { - err := w.register(ev.Name, watch.flags, true) - if !w.sendError(err) { - return - } - - // This was a directory rename, so we need to update all - // the children. - // - // TODO: this is of course pretty slow; we should use a - // better data structure for storing all of this, e.g. store - // children in the watch. I have some code for this in my - // kqueue refactor we can use in the future. For now I'm - // okay with this as it's not publicly available. - // Correctness first, performance second. - if ev.renamedFrom != "" { - w.watches.mu.Lock() - for k, ww := range w.watches.wd { - if k == watch.wd || ww.path == ev.Name { - continue - } - if strings.HasPrefix(ww.path, ev.renamedFrom) { - ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) - w.watches.wd[k] = ww - } - } - w.watches.mu.Unlock() - } - } - } - - /// Send the events that are not ignored on the events channel - if !w.sendEvent(ev) { - return - } - next() - } - } -} - -func (w *inotify) isRecursive(path string) bool { - ww := w.watches.byPath(path) - if ww == nil { // path could be a file, so also check the Dir. - ww = w.watches.byPath(filepath.Dir(path)) - } - return ww != nil && ww.recurse -} - -func (w *inotify) newEvent(name string, mask, cookie uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_OPEN == unix.IN_OPEN { - e.Op |= xUnportableOpen - } - if mask&unix.IN_ACCESS == unix.IN_ACCESS { - e.Op |= xUnportableRead - } - if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { - e.Op |= xUnportableCloseWrite - } - if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { - e.Op |= xUnportableCloseRead - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - - if cookie != 0 { - if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - w.cookiesMu.Lock() - w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} - w.cookieIndex++ - if w.cookieIndex > 9 { - w.cookieIndex = 0 - } - w.cookiesMu.Unlock() - } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - w.cookiesMu.Lock() - var prev string - for _, c := range w.cookies { - if c.cookie == cookie { - prev = c.path - break - } - } - w.cookiesMu.Unlock() - e.renamedFrom = prev - } - } - return e -} - -func (w *inotify) xSupports(op Op) bool { - return true // Supports everything. -} - -func (w *inotify) state() { - w.watches.mu.Lock() - defer w.watches.mu.Unlock() - for wd, ww := range w.watches.wd { - fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) - } -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go deleted file mode 100644 index d8de5ab76f..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ /dev/null @@ -1,733 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly || darwin - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "time" - - "github.com/fsnotify/fsnotify/internal" - "golang.org/x/sys/unix" -) - -type kqueue struct { - Events chan Event - Errors chan error - - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing kq. - watches *watches - done chan struct{} - doneMu sync.Mutex -} - -type ( - watches struct { - mu sync.RWMutex - wd map[int]watch // wd → watch - path map[string]int // pathname → wd - byDir map[string]map[int]struct{} // dirname(path) → wd - seen map[string]struct{} // Keep track of if we know this file exists. - byUser map[string]struct{} // Watches added with Watcher.Add() - } - watch struct { - wd int - name string - linkName string // In case of links; name is the target, and this is the link. - isDir bool - dirFlags uint32 - } -) - -func newWatches() *watches { - return &watches{ - wd: make(map[int]watch), - path: make(map[string]int), - byDir: make(map[string]map[int]struct{}), - seen: make(map[string]struct{}), - byUser: make(map[string]struct{}), - } -} - -func (w *watches) listPaths(userOnly bool) []string { - w.mu.RLock() - defer w.mu.RUnlock() - - if userOnly { - l := make([]string, 0, len(w.byUser)) - for p := range w.byUser { - l = append(l, p) - } - return l - } - - l := make([]string, 0, len(w.path)) - for p := range w.path { - l = append(l, p) - } - return l -} - -func (w *watches) watchesInDir(path string) []string { - w.mu.RLock() - defer w.mu.RUnlock() - - l := make([]string, 0, 4) - for fd := range w.byDir[path] { - info := w.wd[fd] - if _, ok := w.byUser[info.name]; !ok { - l = append(l, info.name) - } - } - return l -} - -// Mark path as added by the user. -func (w *watches) addUserWatch(path string) { - w.mu.Lock() - defer w.mu.Unlock() - w.byUser[path] = struct{}{} -} - -func (w *watches) addLink(path string, fd int) { - w.mu.Lock() - defer w.mu.Unlock() - - w.path[path] = fd - w.seen[path] = struct{}{} -} - -func (w *watches) add(path, linkPath string, fd int, isDir bool) { - w.mu.Lock() - defer w.mu.Unlock() - - w.path[path] = fd - w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} - - parent := filepath.Dir(path) - byDir, ok := w.byDir[parent] - if !ok { - byDir = make(map[int]struct{}, 1) - w.byDir[parent] = byDir - } - byDir[fd] = struct{}{} -} - -func (w *watches) byWd(fd int) (watch, bool) { - w.mu.RLock() - defer w.mu.RUnlock() - info, ok := w.wd[fd] - return info, ok -} - -func (w *watches) byPath(path string) (watch, bool) { - w.mu.RLock() - defer w.mu.RUnlock() - info, ok := w.wd[w.path[path]] - return info, ok -} - -func (w *watches) updateDirFlags(path string, flags uint32) { - w.mu.Lock() - defer w.mu.Unlock() - - fd := w.path[path] - info := w.wd[fd] - info.dirFlags = flags - w.wd[fd] = info -} - -func (w *watches) remove(fd int, path string) bool { - w.mu.Lock() - defer w.mu.Unlock() - - isDir := w.wd[fd].isDir - delete(w.path, path) - delete(w.byUser, path) - - parent := filepath.Dir(path) - delete(w.byDir[parent], fd) - - if len(w.byDir[parent]) == 0 { - delete(w.byDir, parent) - } - - delete(w.wd, fd) - delete(w.seen, path) - return isDir -} - -func (w *watches) markSeen(path string, exists bool) { - w.mu.Lock() - defer w.mu.Unlock() - if exists { - w.seen[path] = struct{}{} - } else { - delete(w.seen, path) - } -} - -func (w *watches) seenBefore(path string) bool { - w.mu.RLock() - defer w.mu.RUnlock() - _, ok := w.seen[path] - return ok -} - -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} - -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - kq, closepipe, err := newKqueue() - if err != nil { - return nil, err - } - - w := &kqueue{ - Events: ev, - Errors: errs, - kq: kq, - closepipe: closepipe, - done: make(chan struct{}), - watches: newWatches(), - } - - go w.readEvents() - return w, nil -} - -// newKqueue creates a new kernel event queue and returns a descriptor. -// -// This registers a new event on closepipe, which will trigger an event when -// it's closed. This way we can use kevent() without timeout/polling; without -// the closepipe, it would block forever and we wouldn't be able to stop it at -// all. -func newKqueue() (kq int, closepipe [2]int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, closepipe, err - } - - // Register the close pipe. - err = unix.Pipe(closepipe[:]) - if err != nil { - unix.Close(kq) - return kq, closepipe, err - } - unix.CloseOnExec(closepipe[0]) - unix.CloseOnExec(closepipe[1]) - - // Register changes to listen on the closepipe. - changes := make([]unix.Kevent_t, 1) - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, - unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) - - ok, err := unix.Kevent(kq, changes, nil, nil) - if ok == -1 { - unix.Close(kq) - unix.Close(closepipe[0]) - unix.Close(closepipe[1]) - return kq, closepipe, err - } - return kq, closepipe, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *kqueue) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *kqueue) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *kqueue) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -func (w *kqueue) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() - return nil - } - close(w.done) - w.doneMu.Unlock() - - pathsToRemove := w.watches.listPaths(false) - for _, name := range pathsToRemove { - w.Remove(name) - } - - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) - return nil -} - -func (w *kqueue) Add(name string) error { return w.AddWith(name) } - -func (w *kqueue) AddWith(name string, opts ...addOpt) error { - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", - time.Now().Format("15:04:05.000000000"), name) - } - - with := getOptions(opts...) - if !w.xSupports(with.op) { - return fmt.Errorf("%w: %s", xErrUnsupported, with.op) - } - - _, err := w.addWatch(name, noteAllEvents) - if err != nil { - return err - } - w.watches.addUserWatch(name) - return nil -} - -func (w *kqueue) Remove(name string) error { - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", - time.Now().Format("15:04:05.000000000"), name) - } - return w.remove(name, true) -} - -func (w *kqueue) remove(name string, unwatchFiles bool) error { - if w.isClosed() { - return nil - } - - name = filepath.Clean(name) - info, ok := w.watches.byPath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - err := w.register([]int{info.wd}, unix.EV_DELETE, 0) - if err != nil { - return err - } - - unix.Close(info.wd) - - isDir := w.watches.remove(info.wd, name) - - // Find all watched paths that are in this directory that are not external. - if unwatchFiles && isDir { - pathsToRemove := w.watches.watchesInDir(name) - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error to - // the user, as that will just confuse them with an error about a - // path they did not explicitly watch themselves. - w.Remove(name) - } - } - return nil -} - -func (w *kqueue) WatchList() []string { - if w.isClosed() { - return nil - } - return w.watches.listPaths(true) -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// addWatch adds name to the watched file set; the flags are interpreted as -// described in kevent(2). -// -// Returns the real path to the file which was added, with symlinks resolved. -func (w *kqueue) addWatch(name string, flags uint32) (string, error) { - if w.isClosed() { - return "", ErrClosed - } - - name = filepath.Clean(name) - - info, alreadyWatching := w.watches.byPath(name) - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets or named pipes. - if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { - return "", nil - } - - // Follow symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - link, err := os.Readlink(name) - if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil - } - - _, alreadyWatching = w.watches.byPath(link) - if alreadyWatching { - // Add to watches so we don't get spurious Create events later - // on when we diff the directories. - w.watches.addLink(name, 0) - return link, nil - } - - info.linkName = name - name = link - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - // Retry on EINTR; open() can return EINTR in practice on macOS. - // See #354, and Go issues 11180 and 39237. - for { - info.wd, err = unix.Open(name, openMode, 0) - if err == nil { - break - } - if errors.Is(err, unix.EINTR) { - continue - } - - return "", err - } - - info.isDir = fi.IsDir() - } - - err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) - if err != nil { - unix.Close(info.wd) - return "", err - } - - if !alreadyWatching { - w.watches.add(name, info.linkName, info.wd, info.isDir) - } - - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - if info.isDir { - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) - w.watches.updateDirFlags(name, flags) - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *kqueue) readEvents() { - defer func() { - close(w.Events) - close(w.Errors) - _ = unix.Close(w.kq) - unix.Close(w.closepipe[0]) - }() - - eventBuffer := make([]unix.Kevent_t, 10) - for { - kevents, err := w.read(eventBuffer) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - return - } - } - - for _, kevent := range kevents { - var ( - wd = int(kevent.Ident) - mask = uint32(kevent.Fflags) - ) - - // Shut down the loop when the pipe is closed, but only after all - // other events have been processed. - if wd == w.closepipe[0] { - return - } - - path, ok := w.watches.byWd(wd) - if debug { - internal.Debug(path.name, &kevent) - } - - // On macOS it seems that sometimes an event with Ident=0 is - // delivered, and no other flags/information beyond that, even - // though we never saw such a file descriptor. For example in - // TestWatchSymlink/277 (usually at the end, but sometimes sooner): - // - // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) - // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} - // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} - // - // The first is a normal event, the second with Ident 0. No error - // flag, no data, no ... nothing. - // - // I read a bit through bsd/kern_event.c from the xnu source, but I - // don't really see an obvious location where this is triggered – - // this doesn't seem intentional, but idk... - // - // Technically fd 0 is a valid descriptor, so only skip it if - // there's no path, and if we're on macOS. - if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { - continue - } - - event := w.newEvent(path.name, path.linkName, mask) - - if event.Has(Rename) || event.Has(Remove) { - w.remove(event.Name, false) - w.watches.markSeen(event.Name, false) - } - - if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.dirChange(event.Name) - } else if !w.sendEvent(event) { - return - } - - if event.Has(Remove) { - // Look for a file that may have overwritten this; for example, - // mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - _, found := w.watches.byPath(fileDir) - if found { - // TODO: this branch is never triggered in any test. - // Added in d6220df (2012). - // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 - // - // I don't really get how this can be triggered either. - // And it wasn't triggered in the patch that added it, - // either. - // - // Original also had a comment: - // make sure the directory exists before we watch for - // changes. When we do a recursive watch and perform - // rm -rf, the parent directory might have gone - // missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the - // parent directory. - err := w.dirChange(fileDir) - if !w.sendError(err) { - return - } - } - } else { - path := filepath.Clean(event.Name) - if fi, err := os.Lstat(path); err == nil { - err := w.sendCreateIfNew(path, fi) - if !w.sendError(err) { - return - } - } - } - } - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { - e := Event{Name: name} - if linkName != "" { - // If the user watched "/path/link" then emit events as "/path/link" - // rather than "/path/target". - e.Name = linkName - } - - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - // No point sending a write and delete event at the same time: if it's gone, - // then it's gone. - if e.Op.Has(Write) && e.Op.Has(Remove) { - e.Op &^= Write - } - return e -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *kqueue) watchDirectoryFiles(dirPath string) error { - files, err := os.ReadDir(dirPath) - if err != nil { - return err - } - - for _, f := range files { - path := filepath.Join(dirPath, f.Name()) - - fi, err := f.Info() - if err != nil { - return fmt.Errorf("%q: %w", path, err) - } - - cleanPath, err := w.internalWatch(path, fi) - if err != nil { - // No permission to read the file; that's not a problem: just skip. - // But do add it to w.fileExists to prevent it from being picked up - // as a "new" file later (it still shows up in the directory - // listing). - switch { - case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): - cleanPath = filepath.Clean(path) - default: - return fmt.Errorf("%q: %w", path, err) - } - } - - w.watches.markSeen(cleanPath, true) - } - - return nil -} - -// Search the directory for new files and send an event for them. -// -// This functionality is to have the BSD watcher match the inotify, which sends -// a create event for files created in a watched directory. -func (w *kqueue) dirChange(dir string) error { - files, err := os.ReadDir(dir) - if err != nil { - // Directory no longer exists: we can ignore this safely. kqueue will - // still give us the correct events. - if errors.Is(err, os.ErrNotExist) { - return nil - } - return fmt.Errorf("fsnotify.dirChange: %w", err) - } - - for _, f := range files { - fi, err := f.Info() - if err != nil { - return fmt.Errorf("fsnotify.dirChange: %w", err) - } - - err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) - if err != nil { - // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { - return nil - } - return fmt.Errorf("fsnotify.dirChange: %w", err) - } - } - return nil -} - -// Send a create event if the file isn't already being tracked, and start -// watching this file. -func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { - if !w.watches.seenBefore(path) { - if !w.sendEvent(Event{Name: path, Op: Create}) { - return nil - } - } - - // Like watchDirectoryFiles, but without doing another ReadDir. - path, err := w.internalWatch(path, fi) - if err != nil { - return err - } - w.watches.markSeen(path, true) - return nil -} - -func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { - if fi.IsDir() { - // mimic Linux providing delete events for subdirectories, but preserve - // the flags used if currently watching subdirectory - info, _ := w.watches.byPath(name) - return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// Register events with the queue. -func (w *kqueue) register(fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - for i, fd := range fds { - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // Register the events. - success, err := unix.Kevent(w.kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(w.kq, nil, events, nil) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -func (w *kqueue) xSupports(op Op) bool { - if runtime.GOOS == "freebsd" { - //return true // Supports everything. - } - if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || - op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { - return false - } - return true -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go deleted file mode 100644 index 5eb5dbc66f..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) - -package fsnotify - -import "errors" - -type other struct { - Events chan Event - Errors chan error -} - -func newBackend(ev chan Event, errs chan error) (backend, error) { - return nil, errors.New("fsnotify not supported on the current platform") -} -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - return newBackend(ev, errs) -} -func (w *other) Close() error { return nil } -func (w *other) WatchList() []string { return nil } -func (w *other) Add(name string) error { return nil } -func (w *other) AddWith(name string, opts ...addOpt) error { return nil } -func (w *other) Remove(name string) error { return nil } -func (w *other) xSupports(op Op) bool { return false } diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go deleted file mode 100644 index c54a630838..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ /dev/null @@ -1,682 +0,0 @@ -//go:build windows - -// Windows backend based on ReadDirectoryChangesW() -// -// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "sync" - "time" - "unsafe" - - "github.com/fsnotify/fsnotify/internal" - "golang.org/x/sys/windows" -) - -type readDirChangesW struct { - Events chan Event - Errors chan error - - port windows.Handle // Handle to completion port - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error - - mu sync.Mutex // Protects access to watches, closed - watches watchMap // Map of watches (key: i-number) - closed bool // Set to true when Close() is first called -} - -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(50, ev, errs) -} - -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) - if err != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", err) - } - w := &readDirChangesW{ - Events: ev, - Errors: errs, - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -func (w *readDirChangesW) isClosed() bool { - w.mu.Lock() - defer w.mu.Unlock() - return w.closed -} - -func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { - if mask == 0 { - return false - } - - event := w.newEvent(name, uint32(mask)) - event.renamedFrom = renamedFrom - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *readDirChangesW) sendError(err error) bool { - if err == nil { - return true - } - select { - case w.Errors <- err: - return true - case <-w.quit: - return false - } -} - -func (w *readDirChangesW) Close() error { - if w.isClosed() { - return nil - } - - w.mu.Lock() - w.closed = true - w.mu.Unlock() - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } - -func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", - time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) - } - - with := getOptions(opts...) - if !w.xSupports(with.op) { - return fmt.Errorf("%w: %s", xErrUnsupported, with.op) - } - if with.bufsize < 4096 { - return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") - } - - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - bufsize: with.bufsize, - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -func (w *readDirChangesW) Remove(name string) error { - if w.isClosed() { - return nil - } - if debug { - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", - time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) - } - - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -func (w *readDirChangesW) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - for name := range watchEntry.names { - entries = append(entries, filepath.Join(watchEntry.path, name)) - } - // the directory itself is being watched - if watchEntry.mask != 0 { - entries = append(entries, watchEntry.path) - } - } - } - - return entries -} - -// These options are from the old golang.org/x/exp/winfsnotify, where you could -// add various options to the watch. This has long since been removed. -// -// The "sys" in the name is misleading as they're not part of any "system". -// -// This should all be removed at some point, and just use windows.FILE_NOTIFY_* -const ( - sysFSALLEVENTS = 0xfff - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - sysFSIGNORED = 0x8000 -) - -func (w *readDirChangesW) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - bufsize int - reply chan error -} - -type inode struct { - handle windows.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov windows.Overlapped - ino *inode // i-number - recurse bool // Recursive watch? - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf []byte // buffer, allocated later -} - -type ( - indexMap map[uint64]*watch - watchMap map[uint32]indexMap -) - -func (w *readDirChangesW) wakeupReader() error { - err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if err != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", err) - } - return nil -} - -func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { - attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) - if err != nil { - return "", os.NewSyscallError("GetFileAttributes", err) - } - if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { - h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), - windows.FILE_LIST_DIRECTORY, - windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, - nil, windows.OPEN_EXISTING, - windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) - if err != nil { - return nil, os.NewSyscallError("CreateFile", err) - } - - var fi windows.ByHandleFileInformation - err = windows.GetFileInformationByHandle(h, &fi) - if err != nil { - windows.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", err) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { - pathname, recurse := recursivePath(pathname) - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - - ino, err := w.getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) - if err != nil { - windows.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", err) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - recurse: recurse, - buf: make([]byte, bufsize), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - windows.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - - err = w.startRead(watchEntry) - if err != nil { - return err - } - - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *readDirChangesW) remWatch(pathname string) error { - pathname, recurse := recursivePath(pathname) - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - ino, err := w.getIno(dir) - if err != nil { - return err - } - - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - - if recurse && !watch.recurse { - return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) - } - - err = windows.CloseHandle(ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - if watch == nil { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) - } - if pathname == dir { - w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *readDirChangesW) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *readDirChangesW) startRead(watch *watch) error { - err := windows.CancelIo(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CancelIo", err)) - w.deleteWatch(watch) - } - mask := w.toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= w.toWindowsFlags(m) - } - if mask == 0 { - err := windows.CloseHandle(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - - // We need to pass the array, rather than the slice. - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) - rdErr := windows.ReadDirectoryChanges(watch.ino.handle, - (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), - watch.recurse, mask, nil, &watch.ov, 0) - if rdErr != nil { - err := os.NewSyscallError("ReadDirectoryChanges", rdErr) - if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *readDirChangesW) readEvents() { - var ( - n uint32 - key uintptr - ov *windows.Overlapped - ) - runtime.LockOSThread() - - for { - // This error is handled after the watch == nil check below. - qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) - - watch := (*watch)(unsafe.Pointer(ov)) - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - - err := windows.CloseHandle(w.port) - if err != nil { - err = os.NewSyscallError("CloseHandle", err) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch qErr { - case nil: - // No error - case windows.ERROR_MORE_DATA: - if watch == nil { - w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case windows.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case windows.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) - continue - } - - var offset uint32 - for { - if n == 0 { - w.sendError(ErrEventOverflow) - break - } - - // Point "raw" to the event in the buffer - raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - - // Create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := windows.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - if debug { - internal.Debug(fullname, raw.Action) - } - - var mask uint64 - switch raw.Action { - case windows.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case windows.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case windows.FILE_ACTION_RENAMED_NEW_NAME: - // Update saved path of all sub-watches. - old := filepath.Join(watch.path, watch.rename) - w.mu.Lock() - for _, watchMap := range w.watches { - for _, ww := range watchMap { - if strings.HasPrefix(ww.path, old) { - ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) - } - } - } - w.mu.Unlock() - - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - w.sendEvent(fullname, "", watch.names[name]&mask) - } - if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) - } else { - w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) - } - - if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) - break - } - } - - if err := w.startRead(watch); err != nil { - w.sendError(err) - } - } -} - -func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSMODIFY != 0 { - m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { - switch action { - case windows.FILE_ACTION_ADDED: - return sysFSCREATE - case windows.FILE_ACTION_REMOVED: - return sysFSDELETE - case windows.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case windows.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} - -func (w *readDirChangesW) xSupports(op Op) bool { - if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || - op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { - return false - } - return true -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 0760efe916..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,494 +0,0 @@ -// Package fsnotify provides a cross-platform interface for file system -// notifications. -// -// Currently supported systems: -// -// - Linux via inotify -// - BSD, macOS via kqueue -// - Windows via ReadDirectoryChangesW -// - illumos via FEN -// -// # FSNOTIFY_DEBUG -// -// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to -// stderr. This can be useful to track down some problems, especially in cases -// where fsnotify is used as an indirect dependency. -// -// Every event will be printed as soon as there's something useful to print, -// with as little processing from fsnotify. -// -// Example output: -// -// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" -// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" -// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\\path\\to\\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all files, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - b backend - - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - Errors chan error -} - -// Event represents a file system notification. -type Event struct { - // Path to the file or directory. - // - // Paths are relative to the input; for example with Add("dir") the Name - // will be set to "dir/file" if you create that file, but if you use - // Add("/path/to/dir") it will be "/path/to/dir/file". - Name string - - // File operation that triggered the event. - // - // This is a bitmask and some systems may send multiple operations at once. - // Use the Event.Has() method instead of comparing with ==. - Op Op - - // Create events will have this set to the old path if it's a rename. This - // only works when both the source and destination are watched. It's not - // reliable when watching individual files, only directories. - // - // For example "mv /tmp/file /tmp/rename" will emit: - // - // Event{Op: Rename, Name: "/tmp/file"} - // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} - renamedFrom string -} - -// Op describes a set of file operations. -type Op uint32 - -// The operations fsnotify can trigger; see the documentation on [Watcher] for a -// full description, and check them with [Event.Has]. -const ( - // A new pathname was created. - Create Op = 1 << iota - - // The pathname was written to; this does *not* mean the write has finished, - // and a write can be followed by more writes. - Write - - // The path was removed; any watches on it will be removed. Some "remove" - // operations may trigger a Rename if the file is actually moved (for - // example "remove to trash" is often a rename). - Remove - - // The path was renamed to something else; any watches on it will be - // removed. - Rename - - // File attributes were changed. - // - // It's generally not recommended to take action on this event, as it may - // get triggered very frequently by some software. For example, Spotlight - // indexing on macOS, anti-virus software, backup software, etc. - Chmod - - // File descriptor was opened. - // - // Only works on Linux and FreeBSD. - xUnportableOpen - - // File was read from. - // - // Only works on Linux and FreeBSD. - xUnportableRead - - // File opened for writing was closed. - // - // Only works on Linux and FreeBSD. - // - // The advantage of using this over Write is that it's more reliable than - // waiting for Write events to stop. It's also faster (if you're not - // listening to Write events): copying a file of a few GB can easily - // generate tens of thousands of Write events in a short span of time. - xUnportableCloseWrite - - // File opened for reading was closed. - // - // Only works on Linux and FreeBSD. - xUnportableCloseRead -) - -var ( - // ErrNonExistentWatch is used when Remove() is called on a path that's not - // added. - ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - - // ErrClosed is used when trying to operate on a closed Watcher. - ErrClosed = errors.New("fsnotify: watcher already closed") - - // ErrEventOverflow is reported from the Errors channel when there are too - // many events: - // - // - inotify: inotify returns IN_Q_OVERFLOW – because there are too - // many queued events (the fs.inotify.max_queued_events - // sysctl can be used to increase this). - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - - // ErrUnsupported is returned by AddWith() when WithOps() specified an - // Unportable event that's not supported on this platform. - xErrUnsupported = errors.New("fsnotify: not supported with this backend") -) - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) - b, err := newBackend(ev, errs) - if err != nil { - return nil, err - } - return &Watcher{b: b, Events: ev, Errors: errs}, nil -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) - b, err := newBufferedBackend(sz, ev, errs) - if err != nil { - return nil, err - } - return &Watcher{b: b, Events: ev, Errors: errs}, nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(path string) error { return w.b.Add(path) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return w.b.Close() } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return w.b.WatchList() } - -// Supports reports if all the listed operations are supported by this platform. -// -// Create, Write, Remove, Rename, and Chmod are always supported. It can only -// return false for an Op starting with Unportable. -func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } - -func (o Op) String() string { - var b strings.Builder - if o.Has(Create) { - b.WriteString("|CREATE") - } - if o.Has(Remove) { - b.WriteString("|REMOVE") - } - if o.Has(Write) { - b.WriteString("|WRITE") - } - if o.Has(xUnportableOpen) { - b.WriteString("|OPEN") - } - if o.Has(xUnportableRead) { - b.WriteString("|READ") - } - if o.Has(xUnportableCloseWrite) { - b.WriteString("|CLOSE_WRITE") - } - if o.Has(xUnportableCloseRead) { - b.WriteString("|CLOSE_READ") - } - if o.Has(Rename) { - b.WriteString("|RENAME") - } - if o.Has(Chmod) { - b.WriteString("|CHMOD") - } - if b.Len() == 0 { - return "[no events]" - } - return b.String()[1:] -} - -// Has reports if this operation has the given operation. -func (o Op) Has(h Op) bool { return o&h != 0 } - -// Has reports if this event has the given operation. -func (e Event) Has(op Op) bool { return e.Op.Has(op) } - -// String returns a string representation of the event with their path. -func (e Event) String() string { - if e.renamedFrom != "" { - return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) - } - return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) -} - -type ( - backend interface { - Add(string) error - AddWith(string, ...addOpt) error - Remove(string) error - WatchList() []string - Close() error - xSupports(Op) bool - } - addOpt func(opt *withOpts) - withOpts struct { - bufsize int - op Op - noFollow bool - sendCreate bool - } -) - -var debug = func() bool { - // Check for exactly "1" (rather than mere existence) so we can add - // options/flags in the future. I don't know if we ever want that, but it's - // nice to leave the option open. - return os.Getenv("FSNOTIFY_DEBUG") == "1" -}() - -var defaultOpts = withOpts{ - bufsize: 65536, // 64K - op: Create | Write | Remove | Rename | Chmod, -} - -func getOptions(opts ...addOpt) withOpts { - with := defaultOpts - for _, o := range opts { - if o != nil { - o(&with) - } - } - return with -} - -// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. -// -// This only has effect on Windows systems, and is a no-op for other backends. -// -// The default value is 64K (65536 bytes) which is the highest value that works -// on all filesystems and should be enough for most applications, but if you -// have a large burst of events it may not be enough. You can increase it if -// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). -// -// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -func WithBufferSize(bytes int) addOpt { - return func(opt *withOpts) { opt.bufsize = bytes } -} - -// WithOps sets which operations to listen for. The default is [Create], -// [Write], [Remove], [Rename], and [Chmod]. -// -// Excluding operations you're not interested in can save quite a bit of CPU -// time; in some use cases there may be hundreds of thousands of useless Write -// or Chmod operations per second. -// -// This can also be used to add unportable operations not supported by all -// platforms; unportable operations all start with "Unportable": -// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and -// [UnportableCloseRead]. -// -// AddWith returns an error when using an unportable operation that's not -// supported. Use [Watcher.Support] to check for support. -func withOps(op Op) addOpt { - return func(opt *withOpts) { opt.op = op } -} - -// WithNoFollow disables following symlinks, so the symlinks themselves are -// watched. -func withNoFollow() addOpt { - return func(opt *withOpts) { opt.noFollow = true } -} - -// "Internal" option for recursive watches on inotify. -func withCreate() addOpt { - return func(opt *withOpts) { opt.sendCreate = true } -} - -var enableRecurse = false - -// Check if this path is recursive (ends with "/..." or "\..."), and return the -// path with the /... stripped. -func recursivePath(path string) (string, bool) { - path = filepath.Clean(path) - if !enableRecurse { // Only enabled in tests for now. - return path, false - } - if filepath.Base(path) == "..." { - return filepath.Dir(path), true - } - return path, false -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go deleted file mode 100644 index b0eab10090..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build darwin - -package internal - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES -) - -var maxfiles uint64 - -// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ -func SetRlimit() { - var l syscall.Rlimit - err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) - if err == nil && l.Cur != l.Max { - l.Cur = l.Max - syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) - } - maxfiles = l.Cur - - if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { - maxfiles = uint64(n) - } - - if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { - maxfiles = uint64(n) - } -} - -func Maxfiles() uint64 { return maxfiles } -func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } -func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go deleted file mode 100644 index 928319fb09..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go +++ /dev/null @@ -1,57 +0,0 @@ -package internal - -import "golang.org/x/sys/unix" - -var names = []struct { - n string - m uint32 -}{ - {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, - {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, - {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, - {"NOTE_CHILD", unix.NOTE_CHILD}, - {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, - {"NOTE_DELETE", unix.NOTE_DELETE}, - {"NOTE_EXEC", unix.NOTE_EXEC}, - {"NOTE_EXIT", unix.NOTE_EXIT}, - {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, - {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, - {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, - {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, - {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, - {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, - {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, - {"NOTE_EXTEND", unix.NOTE_EXTEND}, - {"NOTE_FFAND", unix.NOTE_FFAND}, - {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, - {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, - {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, - {"NOTE_FFNOP", unix.NOTE_FFNOP}, - {"NOTE_FFOR", unix.NOTE_FFOR}, - {"NOTE_FORK", unix.NOTE_FORK}, - {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, - {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, - {"NOTE_LINK", unix.NOTE_LINK}, - {"NOTE_LOWAT", unix.NOTE_LOWAT}, - {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, - {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, - {"NOTE_NONE", unix.NOTE_NONE}, - {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, - {"NOTE_OOB", unix.NOTE_OOB}, - //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) - {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, - {"NOTE_REAP", unix.NOTE_REAP}, - {"NOTE_RENAME", unix.NOTE_RENAME}, - {"NOTE_REVOKE", unix.NOTE_REVOKE}, - {"NOTE_SECONDS", unix.NOTE_SECONDS}, - {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, - {"NOTE_TRACK", unix.NOTE_TRACK}, - {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, - {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, - {"NOTE_USECONDS", unix.NOTE_USECONDS}, - {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, - {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, - {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, - {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, - {"NOTE_WRITE", unix.NOTE_WRITE}, -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go deleted file mode 100644 index 3186b0c349..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go +++ /dev/null @@ -1,33 +0,0 @@ -package internal - -import "golang.org/x/sys/unix" - -var names = []struct { - n string - m uint32 -}{ - {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, - {"NOTE_CHILD", unix.NOTE_CHILD}, - {"NOTE_DELETE", unix.NOTE_DELETE}, - {"NOTE_EXEC", unix.NOTE_EXEC}, - {"NOTE_EXIT", unix.NOTE_EXIT}, - {"NOTE_EXTEND", unix.NOTE_EXTEND}, - {"NOTE_FFAND", unix.NOTE_FFAND}, - {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, - {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, - {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, - {"NOTE_FFNOP", unix.NOTE_FFNOP}, - {"NOTE_FFOR", unix.NOTE_FFOR}, - {"NOTE_FORK", unix.NOTE_FORK}, - {"NOTE_LINK", unix.NOTE_LINK}, - {"NOTE_LOWAT", unix.NOTE_LOWAT}, - {"NOTE_OOB", unix.NOTE_OOB}, - {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, - {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, - {"NOTE_RENAME", unix.NOTE_RENAME}, - {"NOTE_REVOKE", unix.NOTE_REVOKE}, - {"NOTE_TRACK", unix.NOTE_TRACK}, - {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, - {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, - {"NOTE_WRITE", unix.NOTE_WRITE}, -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go deleted file mode 100644 index f69fdb930f..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go +++ /dev/null @@ -1,42 +0,0 @@ -package internal - -import "golang.org/x/sys/unix" - -var names = []struct { - n string - m uint32 -}{ - {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, - {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, - {"NOTE_CHILD", unix.NOTE_CHILD}, - {"NOTE_CLOSE", unix.NOTE_CLOSE}, - {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, - {"NOTE_DELETE", unix.NOTE_DELETE}, - {"NOTE_EXEC", unix.NOTE_EXEC}, - {"NOTE_EXIT", unix.NOTE_EXIT}, - {"NOTE_EXTEND", unix.NOTE_EXTEND}, - {"NOTE_FFAND", unix.NOTE_FFAND}, - {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, - {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, - {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, - {"NOTE_FFNOP", unix.NOTE_FFNOP}, - {"NOTE_FFOR", unix.NOTE_FFOR}, - {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, - {"NOTE_FORK", unix.NOTE_FORK}, - {"NOTE_LINK", unix.NOTE_LINK}, - {"NOTE_LOWAT", unix.NOTE_LOWAT}, - {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, - {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, - {"NOTE_OPEN", unix.NOTE_OPEN}, - {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, - {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, - {"NOTE_READ", unix.NOTE_READ}, - {"NOTE_RENAME", unix.NOTE_RENAME}, - {"NOTE_REVOKE", unix.NOTE_REVOKE}, - {"NOTE_SECONDS", unix.NOTE_SECONDS}, - {"NOTE_TRACK", unix.NOTE_TRACK}, - {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, - {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, - {"NOTE_USECONDS", unix.NOTE_USECONDS}, - {"NOTE_WRITE", unix.NOTE_WRITE}, -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go deleted file mode 100644 index 607e683bd7..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly || darwin - -package internal - -import ( - "fmt" - "os" - "strings" - "time" - - "golang.org/x/sys/unix" -) - -func Debug(name string, kevent *unix.Kevent_t) { - mask := uint32(kevent.Fflags) - - var ( - l []string - unknown = mask - ) - for _, n := range names { - if mask&n.m == n.m { - l = append(l, n.n) - unknown ^= n.m - } - } - if unknown > 0 { - l = append(l, fmt.Sprintf("0x%x", unknown)) - } - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", - time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go deleted file mode 100644 index 35c734be43..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go +++ /dev/null @@ -1,56 +0,0 @@ -package internal - -import ( - "fmt" - "os" - "strings" - "time" - - "golang.org/x/sys/unix" -) - -func Debug(name string, mask, cookie uint32) { - names := []struct { - n string - m uint32 - }{ - {"IN_ACCESS", unix.IN_ACCESS}, - {"IN_ATTRIB", unix.IN_ATTRIB}, - {"IN_CLOSE", unix.IN_CLOSE}, - {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, - {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, - {"IN_CREATE", unix.IN_CREATE}, - {"IN_DELETE", unix.IN_DELETE}, - {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, - {"IN_IGNORED", unix.IN_IGNORED}, - {"IN_ISDIR", unix.IN_ISDIR}, - {"IN_MODIFY", unix.IN_MODIFY}, - {"IN_MOVE", unix.IN_MOVE}, - {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, - {"IN_MOVED_TO", unix.IN_MOVED_TO}, - {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, - {"IN_OPEN", unix.IN_OPEN}, - {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, - {"IN_UNMOUNT", unix.IN_UNMOUNT}, - } - - var ( - l []string - unknown = mask - ) - for _, n := range names { - if mask&n.m == n.m { - l = append(l, n.n) - unknown ^= n.m - } - } - if unknown > 0 { - l = append(l, fmt.Sprintf("0x%x", unknown)) - } - var c string - if cookie > 0 { - c = fmt.Sprintf("(cookie: %d) ", cookie) - } - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", - time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go deleted file mode 100644 index e5b3b6f694..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go +++ /dev/null @@ -1,25 +0,0 @@ -package internal - -import "golang.org/x/sys/unix" - -var names = []struct { - n string - m uint32 -}{ - {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, - {"NOTE_CHILD", unix.NOTE_CHILD}, - {"NOTE_DELETE", unix.NOTE_DELETE}, - {"NOTE_EXEC", unix.NOTE_EXEC}, - {"NOTE_EXIT", unix.NOTE_EXIT}, - {"NOTE_EXTEND", unix.NOTE_EXTEND}, - {"NOTE_FORK", unix.NOTE_FORK}, - {"NOTE_LINK", unix.NOTE_LINK}, - {"NOTE_LOWAT", unix.NOTE_LOWAT}, - {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, - {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, - {"NOTE_RENAME", unix.NOTE_RENAME}, - {"NOTE_REVOKE", unix.NOTE_REVOKE}, - {"NOTE_TRACK", unix.NOTE_TRACK}, - {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, - {"NOTE_WRITE", unix.NOTE_WRITE}, -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go deleted file mode 100644 index 1dd455bc5a..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go +++ /dev/null @@ -1,28 +0,0 @@ -package internal - -import "golang.org/x/sys/unix" - -var names = []struct { - n string - m uint32 -}{ - {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, - // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? - {"NOTE_CHILD", unix.NOTE_CHILD}, - {"NOTE_DELETE", unix.NOTE_DELETE}, - {"NOTE_EOF", unix.NOTE_EOF}, - {"NOTE_EXEC", unix.NOTE_EXEC}, - {"NOTE_EXIT", unix.NOTE_EXIT}, - {"NOTE_EXTEND", unix.NOTE_EXTEND}, - {"NOTE_FORK", unix.NOTE_FORK}, - {"NOTE_LINK", unix.NOTE_LINK}, - {"NOTE_LOWAT", unix.NOTE_LOWAT}, - {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, - {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, - {"NOTE_RENAME", unix.NOTE_RENAME}, - {"NOTE_REVOKE", unix.NOTE_REVOKE}, - {"NOTE_TRACK", unix.NOTE_TRACK}, - {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, - {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, - {"NOTE_WRITE", unix.NOTE_WRITE}, -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go deleted file mode 100644 index f1b2e73bd5..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go +++ /dev/null @@ -1,45 +0,0 @@ -package internal - -import ( - "fmt" - "os" - "strings" - "time" - - "golang.org/x/sys/unix" -) - -func Debug(name string, mask int32) { - names := []struct { - n string - m int32 - }{ - {"FILE_ACCESS", unix.FILE_ACCESS}, - {"FILE_MODIFIED", unix.FILE_MODIFIED}, - {"FILE_ATTRIB", unix.FILE_ATTRIB}, - {"FILE_TRUNC", unix.FILE_TRUNC}, - {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, - {"FILE_DELETE", unix.FILE_DELETE}, - {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, - {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, - {"UNMOUNTED", unix.UNMOUNTED}, - {"MOUNTEDOVER", unix.MOUNTEDOVER}, - {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, - } - - var ( - l []string - unknown = mask - ) - for _, n := range names { - if mask&n.m == n.m { - l = append(l, n.n) - unknown ^= n.m - } - } - if unknown > 0 { - l = append(l, fmt.Sprintf("0x%x", unknown)) - } - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", - time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go deleted file mode 100644 index 52bf4ce53b..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package internal - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "golang.org/x/sys/windows" -) - -func Debug(name string, mask uint32) { - names := []struct { - n string - m uint32 - }{ - {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, - {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, - {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, - {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, - {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, - } - - var ( - l []string - unknown = mask - ) - for _, n := range names { - if mask&n.m == n.m { - l = append(l, n.n) - unknown ^= n.m - } - } - if unknown > 0 { - l = append(l, fmt.Sprintf("0x%x", unknown)) - } - fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", - time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go deleted file mode 100644 index 547df1df84..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build freebsd - -package internal - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES -) - -var maxfiles uint64 - -func SetRlimit() { - // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ - var l syscall.Rlimit - err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) - if err == nil && l.Cur != l.Max { - l.Cur = l.Max - syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) - } - maxfiles = uint64(l.Cur) -} - -func Maxfiles() uint64 { return maxfiles } -func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } -func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/internal.go deleted file mode 100644 index 7daa45e19e..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/internal.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package internal contains some helpers. -package internal diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go deleted file mode 100644 index 30976ce973..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !windows && !darwin && !freebsd - -package internal - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES -) - -var maxfiles uint64 - -func SetRlimit() { - // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ - var l syscall.Rlimit - err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) - if err == nil && l.Cur != l.Max { - l.Cur = l.Max - syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) - } - maxfiles = uint64(l.Cur) -} - -func Maxfiles() uint64 { return maxfiles } -func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } -func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/unix2.go deleted file mode 100644 index 37dfeddc28..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/unix2.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows - -package internal - -func HasPrivilegesForSymlink() bool { - return true -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go deleted file mode 100644 index a72c649549..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build windows - -package internal - -import ( - "errors" - - "golang.org/x/sys/windows" -) - -// Just a dummy. -var ( - SyscallEACCES = errors.New("dummy") - UnixEACCES = errors.New("dummy") -) - -func SetRlimit() {} -func Maxfiles() uint64 { return 1<<64 - 1 } -func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } -func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } - -func HasPrivilegesForSymlink() bool { - var sid *windows.SID - err := windows.AllocateAndInitializeSid( - &windows.SECURITY_NT_AUTHORITY, - 2, - windows.SECURITY_BUILTIN_DOMAIN_RID, - windows.DOMAIN_ALIAS_RID_ADMINS, - 0, 0, 0, 0, 0, 0, - &sid) - if err != nil { - return false - } - defer windows.FreeSid(sid) - token := windows.Token(0) - member, err := token.IsMember(sid) - if err != nil { - return false - } - return member || token.IsElevated() -} diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go deleted file mode 100644 index f65e8fe3ed..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/openshift/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/openshift/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go deleted file mode 100644 index a29fc7aab6..0000000000 --- a/openshift/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/openshift/tools/vendor/github.com/fxamacker/cbor/v2/README.md b/openshift/tools/vendor/github.com/fxamacker/cbor/v2/README.md index af0a79507e..d072b81c73 100644 --- a/openshift/tools/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/openshift/tools/vendor/github.com/fxamacker/cbor/v2/README.md @@ -1,30 +1,31 @@ -# CBOR Codec in Go - - +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. -`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). +`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty field when encoding +- `omitzero`: omit zero-value field when encoding + +As a special case, struct field tag "-" omits the field. + +NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field. ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. + +As a special case, struct field tag "-" omits the field. + +
🔎  Example encoding with struct field tag "-"

+ +https://go.dev/play/p/aWEIFxd7InX + +```Go +// https://github.com/fxamacker/cbor/issues/652 +package main + +import ( + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// The `cbor:"-"` tag omits the Type field when encoding to CBOR. +type Entity struct { + _ struct{} `cbor:",toarray"` + ID uint64 `json:"id"` + Type string `cbor:"-" json:"typeOf"` + Name string `json:"name"` +} + +func main() { + entity := Entity{ + ID: 1, + Type: "int64", + Name: "Identifier", + } + + c, _ := cbor.Marshal(entity) + diag, _ := cbor.Diagnose(c) + fmt.Printf("CBOR in hex: %x\n", c) + fmt.Printf("CBOR in edn: %s\n", diag) + + j, _ := json.Marshal(entity) + fmt.Printf("JSON: %s\n", string(j)) + + fmt.Printf("JSON encoding is %d bytes\n", len(j)) + fmt.Printf("CBOR encoding is %d bytes\n", len(c)) + + // Output: + // CBOR in hex: 82016a4964656e746966696572 + // CBOR in edn: [1, "Identifier"] + // JSON: {"id":1,"typeOf":"int64","name":"Identifier"} + // JSON encoding is 45 bytes + // CBOR encoding is 13 bytes +} +``` + +

-
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ```

+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces. + +Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc. + +The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2). + +
🔎  Example using Embedded JSON Tag for CBOR (tag 262) + +```go +// https://github.com/fxamacker/cbor/issues/657 + +package cbor_test + +// NOTE: RFC 8949 does not mention tag number 262. IANA assigned +// CBOR tag number 262 as "Embedded JSON Object" specified by the +// document Embedded JSON Tag for CBOR: +// +// "Tag 262 can be applied to a byte string (major type 2) to indicate +// that the byte string is a JSON Object. The length of the byte string +// indicates the content." +// +// For more info, see Embedded JSON Tag for CBOR at: +// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// cborTagNumForEmbeddedJSON is the CBOR tag number 262. +const cborTagNumForEmbeddedJSON = 262 + +// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item +// with tag number 262 and the tag content is a JSON object "embedded" as a +// CBOR byte string (major type 2). +type EmbeddedJSON struct { + any +} + +func NewEmbeddedJSON(val any) EmbeddedJSON { + return EmbeddedJSON{val} +} + +// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the +// tag number 262 and the tag content is a JSON object that is +// "embedded" as a CBOR byte string. +func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) { + // Encode v to JSON object. + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // Create cbor.Tag representing a tagged CBOR data item. + tag := cbor.Tag{ + Number: cborTagNumForEmbeddedJSON, + Content: data, + } + + // Marshal to a tagged CBOR data item. + return cbor.Marshal(tag) +} + +// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON. +// The byte slice provided to this function must contain a single +// tagged CBOR data item with the tag number 262 and tag content +// must be a JSON object "embedded" as a CBOR byte string. +func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error { + // Unmarshal tagged CBOR data item. + var tag cbor.Tag + if err := cbor.Unmarshal(b, &tag); err != nil { + return err + } + + // Check tag number. + if tag.Number != cborTagNumForEmbeddedJSON { + return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON) + } + + // Check tag content. + jsonData, isByteString := tag.Content.([]byte) + if !isByteString { + return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content) + } + + // Unmarshal JSON object. + return json.Unmarshal(jsonData, v) +} + +// MarshalJSON encodes EmbeddedJSON to a JSON object. +func (v EmbeddedJSON) MarshalJSON() ([]byte, error) { + return json.Marshal(v.any) +} + +// UnmarshalJSON decodes a JSON object. +func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error { + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + return dec.Decode(&v.any) +} + +func Example_embeddedJSONTagForCBOR() { + value := NewEmbeddedJSON(map[string]any{ + "name": "gopher", + "id": json.Number("42"), + }) + + data, err := cbor.Marshal(value) + if err != nil { + panic(err) + } + + fmt.Printf("cbor: %x\n", data) + + var v EmbeddedJSON + err = cbor.Unmarshal(data, &v) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", v.any) + for k, v := range v.any.(map[string]any) { + fmt.Printf(" %s: %v (%T)\n", k, v, v) + } +} +``` + +
+ + ### Functions and Interfaces -
Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed. Other useful functions: - `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. - `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. -- `Wellformed` returns true if the the CBOR data item is well-formed. +- `Wellformed` returns true if the CBOR data item is well-formed. Interfaces identical or comparable to Go `encoding` packages include: `Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. @@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. +- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. +- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. +- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. + +v2.9.0 passed fuzz tests and is production quality. + +The minimum version of Go required to build: +- v2.8.0 and newer releases require go 1.20+. +- v2.7.1 and older releases require go 1.17+. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. -v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). +[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). __IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. @@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. bob_dylan -// Nice to see you! --> nice_to_see_you -// widgetID --> widget_id -func Underscore(s string) string { - return New(s).Underscore().String() -} - -// Underscore a string -// bob dylan --> bob_dylan -// Nice to see you! --> nice_to_see_you -// widgetID --> widget_id -func (i Ident) Underscore() Ident { - out := make([]string, 0, len(i.Parts)) - for _, part := range i.Parts { - var x strings.Builder - x.Grow(len(part)) - for _, c := range part { - if unicode.IsLetter(c) || unicode.IsDigit(c) { - x.WriteRune(c) - } - } - if x.Len() > 0 { - out = append(out, x.String()) - } - } - return New(strings.ToLower(strings.Join(out, "_"))) -} diff --git a/openshift/tools/vendor/github.com/gobuffalo/flect/version.go b/openshift/tools/vendor/github.com/gobuffalo/flect/version.go deleted file mode 100644 index 79486ed2c2..0000000000 --- a/openshift/tools/vendor/github.com/gobuffalo/flect/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package flect - -//Version holds Flect version number -const Version = "v1.0.0" diff --git a/openshift/tools/vendor/github.com/google/btree/README.md b/openshift/tools/vendor/github.com/google/btree/README.md deleted file mode 100644 index eab5dbf7ba..0000000000 --- a/openshift/tools/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# BTree implementation for Go - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/openshift/tools/vendor/github.com/google/btree/btree.go b/openshift/tools/vendor/github.com/google/btree/btree.go deleted file mode 100644 index 6f5184fef7..0000000000 --- a/openshift/tools/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,893 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.18 -// +build !go1.18 - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range [last, pivot), until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/openshift/tools/vendor/github.com/google/btree/btree_generic.go b/openshift/tools/vendor/github.com/google/btree/btree_generic.go deleted file mode 100644 index e44a0f4880..0000000000 --- a/openshift/tools/vendor/github.com/google/btree/btree_generic.go +++ /dev/null @@ -1,1083 +0,0 @@ -// Copyright 2014-2022 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.18 -// +build go1.18 - -// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific -// instantiation of that generic for the Item interface, with a backwards- -// compatible API. Before go1.18, generics are not supported, -// and BTree is just an implementation based around the Item interface. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -// -// There are two implementations; those suffixed with 'G' are generics, usable -// for any type, and require a passed-in "less" function to define their ordering. -// Those without this prefix are specific to the 'Item' interface, and use -// its 'Less' function for ordering. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -// FreeListG represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList, in particular when they're created with Clone. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeListG[T any] struct { - mu sync.Mutex - freelist []*node[T] -} - -// NewFreeListG creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeListG[T any](size int) *FreeListG[T] { - return &FreeListG[T]{freelist: make([]*node[T], 0, size)} -} - -func (f *FreeListG[T]) newNode() (n *node[T]) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node[T]) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIteratorG[T any] func(item T) bool - -// Ordered represents the set of types for which the '<' operator work. -type Ordered interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string -} - -// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. -func Less[T Ordered]() LessFunc[T] { - return func(a, b T) bool { return a < b } -} - -// NewOrderedG creates a new B-Tree for ordered types. -func NewOrderedG[T Ordered](degree int) *BTreeG[T] { - return NewG[T](degree, Less[T]()) -} - -// NewG creates a new B-Tree with the given degree. -// -// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -// -// The passed-in LessFunc determines how objects of type T are ordered. -func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { - return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) -} - -// NewWithFreeListG creates a new B-Tree that uses the given node free list. -func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { - if degree <= 1 { - panic("bad degree") - } - return &BTreeG[T]{ - degree: degree, - cow: ©OnWriteContext[T]{freelist: f, less: less}, - } -} - -// items stores items in a node. -type items[T any] []T - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items[T]) insertAt(index int, item T) { - var zero T - *s = append(*s, zero) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items[T]) removeAt(index int) T { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - var zero T - (*s)[len(*s)-1] = zero - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items[T]) pop() (out T) { - index := len(*s) - 1 - out = (*s)[index] - var zero T - (*s)[index] = zero - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items[T]) truncate(index int) { - var toClear items[T] - *s, toClear = (*s)[:index], (*s)[index:] - var zero T - for i := 0; i < len(toClear); i++ { - toClear[i] = zero - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return less(item, s[i]) - }) - if i > 0 && !less(s[i-1], item) { - return i - 1, true - } - return i, false -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node[T any] struct { - items items[T] - children items[*node[T]] - cow *copyOnWriteContext[T] -} - -func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items[T], len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(items[*node[T]], len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node[T]) mutableChild(i int) *node[T] { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node[T]) split(i int) (T, *node[T]) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node[T]) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { - i, found := n.items.find(item, n.cow.less) - if found { - out := n.items[i] - n.items[i] = item - return out, true - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case n.cow.less(item, inTree): - // no change, we want first split node - case n.cow.less(inTree, item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out, true - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node[T]) get(key T) (_ T, _ bool) { - i, found := n.items.find(key, n.cow.less) - if found { - return n.items[i], true - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return -} - -// min returns the first item in the subtree. -func min[T any](n *node[T]) (_ T, found bool) { - if n == nil { - return - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return - } - return n.items[0], true -} - -// max returns the last item in the subtree. -func max[T any](n *node[T]) (_ T, found bool) { - if n == nil { - return - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return - } - return n.items[len(n.items)-1], true -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop(), true - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0), true - } - i = 0 - case removeItem: - i, found = n.items.find(item, n.cow.less) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i), true - } - return - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - var zero T - n.items[i], _ = child.remove(zero, minItems, removeMax) - return out, true - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -type optionalItem[T any] struct { - item T - valid bool -} - -func optional[T any](item T) optionalItem[T] { - return optionalItem[T]{item: item, valid: true} -} -func empty[T any]() optionalItem[T] { - return optionalItem[T]{} -} - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start.valid { - index, _ = n.items.find(start.item, n.cow.less) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { - hit = true - continue - } - hit = true - if stop.valid && !n.cow.less(n.items[i], stop.item) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start.valid { - index, found = n.items.find(start.item, n.cow.less) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start.valid && !n.cow.less(n.items[i], start.item) { - if !includeStart || hit || n.cow.less(start.item, n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop.valid && !n.cow.less(stop.item, n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// print is used for testing/debugging purposes. -func (n *node[T]) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTreeG is a generic implementation of a B-Tree. -// -// BTreeG stores items of type T in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTreeG[T any] struct { - degree int - length int - root *node[T] - cow *copyOnWriteContext[T] -} - -// LessFunc[T] determines how to order a type 'T'. It should implement a strict -// ordering, and should return true if within that ordering, 'a' < 'b'. -type LessFunc[T any] func(a, b T) bool - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext[T any] struct { - freelist *FreeListG[T] - less LessFunc[T] -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTreeG[T]) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTreeG[T]) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned, -// and the second return value is true. Otherwise, (zeroValue, false) -// -// nil cannot be added to the tree (will panic). -func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out, outb := t.root.insert(item, t.maxItems()) - if !outb { - t.length++ - } - return out, outb -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns (zeroValue, false). -func (t *BTreeG[T]) Delete(item T) (T, bool) { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns (zeroValue, false). -func (t *BTreeG[T]) DeleteMin() (T, bool) { - var zero T - return t.deleteItem(zero, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns (zeroValue, false). -func (t *BTreeG[T]) DeleteMax() (T, bool) { - var zero T - return t.deleteItem(zero, removeMax) -} - -func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { - if t.root == nil || len(t.root.items) == 0 { - return - } - t.root = t.root.mutableFor(t.cow) - out, outb := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if outb { - t.length-- - } - return out, outb -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range [last, pivot), until iterator returns false. -func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { - if t.root == nil { - return - } - t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns -// (zeroValue, false) if unable to find that item. -func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { - if t.root == nil { - return - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. -func (t *BTreeG[T]) Min() (_ T, _ bool) { - return min(t.root) -} - -// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. -func (t *BTreeG[T]) Max() (_ T, _ bool) { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTreeG[T]) Has(key T) bool { - _, ok := t.Get(key) - return ok -} - -// Len returns the number of items currently in the tree. -func (t *BTreeG[T]) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree BTreeG[Item] - -var itemLess LessFunc[Item] = func(a, b Item) bool { - return a.Less(b) -} - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return (*BTree)(NewG[Item](degree, itemLess)) -} - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList FreeListG[Item] - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return (*FreeList)(NewFreeListG[Item](size)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator ItemIteratorG[Item] - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - return (*BTree)((*BTreeG[Item])(t).Clone()) -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - i, _ := (*BTreeG[Item])(t).Delete(item) - return i -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - i, _ := (*BTreeG[Item])(t).DeleteMax() - return i -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - i, _ := (*BTreeG[Item])(t).DeleteMin() - return i -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - i, _ := (*BTreeG[Item])(t).Get(key) - return i -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - i, _ := (*BTreeG[Item])(t).Max() - return i -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - i, _ := (*BTreeG[Item])(t).Min() - return i -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return (*BTreeG[Item])(t).Has(key) -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) - return i -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range [last, pivot), until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return (*BTreeG[Item])(t).Len() -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - (*BTreeG[Item])(t).Clear(addNodesToFreelist) -} diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/context.go b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/context.go index 1bfe961219..26b31e51e3 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/context.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/context.go @@ -15,7 +15,7 @@ package compiler import ( - yaml "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) // Context contains state of the compiler as it traverses a document. diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/extensions.go b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/extensions.go index 16ae66faa3..efa07f2a90 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/extensions.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -20,9 +20,9 @@ import ( "os/exec" "strings" + yaml "go.yaml.in/yaml/v3" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - yaml "gopkg.in/yaml.v3" extensions "github.com/google/gnostic-models/extensions" ) diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/helpers.go b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/helpers.go index 975d65e8f8..a83261eb6c 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/helpers.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -20,7 +20,7 @@ import ( "sort" "strconv" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/jsonschema" ) diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/reader.go b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/reader.go index be0e8b40c8..da409d6b36 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/compiler/reader.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/compiler/reader.go @@ -24,7 +24,7 @@ import ( "strings" "sync" - yaml "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) var verboseReader = false diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/models.go b/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/models.go index 4781bdc5f5..a42b8e0035 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/models.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/models.go @@ -16,7 +16,7 @@ // of JSON Schemas. package jsonschema -import "gopkg.in/yaml.v3" +import "go.yaml.in/yaml/v3" // The Schema struct models a JSON Schema and, because schemas are // defined hierarchically, contains many references to itself. diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/reader.go b/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/reader.go index b8583d4660..4f1fe0c08c 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/reader.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/reader.go @@ -21,7 +21,7 @@ import ( "io/ioutil" "strconv" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) // This is a global map of all known Schemas. diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/writer.go b/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/writer.go index 340dc5f933..19f5ddeae2 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/writer.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/jsonschema/writer.go @@ -17,7 +17,7 @@ package jsonschema import ( "fmt" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) const indentation = " " diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go index d71fe6d545..de337d80c8 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) @@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + message := "contains an invalid AdditionalPropertiesItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -2543,7 +2543,7 @@ func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyPara // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid NonBodyParameter") + message := "contains an invalid NonBodyParameter" err := compiler.NewError(context, message) errors = []error{err} } @@ -3271,7 +3271,7 @@ func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid Parameter") + message := "contains an invalid Parameter" err := compiler.NewError(context, message) errors = []error{err} } @@ -3345,7 +3345,7 @@ func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersIte // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ParametersItem") + message := "contains an invalid ParametersItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -4561,7 +4561,7 @@ func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ResponseValue") + message := "contains an invalid ResponseValue" err := compiler.NewError(context, message) errors = []error{err} } @@ -5030,7 +5030,7 @@ func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SchemaItem") + message := "contains an invalid SchemaItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -5160,7 +5160,7 @@ func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*Secu // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") + message := "contains an invalid SecurityDefinitionsItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -6930,7 +6930,7 @@ func (m *BodyParameter) ToRawInfo() *yaml.Node { // always include this required field. info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7149,7 +7149,7 @@ func (m *FileSchema) ToRawInfo() *yaml.Node { // always include this required field. info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.ReadOnly != false { + if m.ReadOnly { info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } @@ -7176,7 +7176,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7192,7 +7192,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } - if m.AllowEmptyValue != false { + if m.AllowEmptyValue { info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } @@ -7220,7 +7220,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7228,7 +7228,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7252,7 +7252,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7306,7 +7306,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7314,7 +7314,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7338,7 +7338,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7373,7 +7373,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7413,7 +7413,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7421,7 +7421,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7445,7 +7445,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7940,7 +7940,7 @@ func (m *Operation) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) } - if m.Deprecated != false { + if m.Deprecated { info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) } @@ -8110,7 +8110,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8118,7 +8118,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8142,7 +8142,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8218,7 +8218,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8226,7 +8226,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8250,7 +8250,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8296,7 +8296,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -8312,7 +8312,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } - if m.AllowEmptyValue != false { + if m.AllowEmptyValue { info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } @@ -8340,7 +8340,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8348,7 +8348,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8372,7 +8372,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8514,7 +8514,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8522,7 +8522,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8546,7 +8546,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8610,7 +8610,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) } - if m.ReadOnly != false { + if m.ReadOnly { info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } @@ -8796,11 +8796,11 @@ func (m *Xml) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) } - if m.Attribute != false { + if m.Attribute { info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) } - if m.Wrapped != false { + if m.Wrapped { info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) } diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/document.go b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/document.go index e96ac0d6da..89469a13ed 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/document.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -15,7 +15,7 @@ package openapi_v2 import ( - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go index 4b1131ce1c..662772dd95 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) @@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + message := "contains an invalid AdditionalPropertiesItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -113,7 +113,7 @@ func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpress // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AnyOrExpression") + message := "contains an invalid AnyOrExpression" err := compiler.NewError(context, message) errors = []error{err} } @@ -227,7 +227,7 @@ func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*Callback // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid CallbackOrReference") + message := "contains an invalid CallbackOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -979,7 +979,7 @@ func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOr // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ExampleOrReference") + message := "contains an invalid ExampleOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -1320,7 +1320,7 @@ func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrRe // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid HeaderOrReference") + message := "contains an invalid HeaderOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -1713,7 +1713,7 @@ func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrRefere // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid LinkOrReference") + message := "contains an invalid LinkOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3090,7 +3090,7 @@ func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*Paramet // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ParameterOrReference") + message := "contains an invalid ParameterOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3606,7 +3606,7 @@ func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*Reque // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid RequestBodyOrReference") + message := "contains an invalid RequestBodyOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3743,7 +3743,7 @@ func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*Response // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ResponseOrReference") + message := "contains an invalid ResponseOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -4310,7 +4310,7 @@ func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrRe // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SchemaOrReference") + message := "contains an invalid SchemaOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -4543,7 +4543,7 @@ func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*Se // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference") + message := "contains an invalid SecuritySchemeOrReference" err := compiler.NewError(context, message) errors = []error{err} } diff --git a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/document.go b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/document.go index 1cee467735..499ff883c5 100644 --- a/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/document.go +++ b/openshift/tools/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -15,7 +15,7 @@ package openapi_v3 import ( - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) diff --git a/openshift/tools/vendor/github.com/google/go-cmp/LICENSE b/openshift/tools/vendor/github.com/google/go-cmp/LICENSE deleted file mode 100644 index 32017f8fa1..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2017 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/compare.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/compare.go deleted file mode 100644 index 0f5b8a48c6..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/compare.go +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cmp determines equality of values. -// -// This package is intended to be a more powerful and safer alternative to -// [reflect.DeepEqual] for comparing whether two values are semantically equal. -// It is intended to only be used in tests, as performance is not a goal and -// it may panic if it cannot compare the values. Its propensity towards -// panicking means that its unsuitable for production environments where a -// spurious panic may be fatal. -// -// The primary features of cmp are: -// -// - When the default behavior of equality does not suit the test's needs, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as -// they are within some tolerance of each other. -// -// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method -// to determine equality. This allows package authors to determine -// the equality operation for the types that they define. -// -// - If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on -// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], -// unexported fields are not compared by default; they result in panics -// unless suppressed by using an [Ignore] option -// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) -// or explicitly compared using the [Exporter] option. -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/function" - "github.com/google/go-cmp/cmp/internal/value" -) - -// TODO(≥go1.18): Use any instead of interface{}. - -// Equal reports whether x and y are equal by recursively applying the -// following rules in the given order to x and y and all of their sub-values: -// -// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that -// remain after applying all path filters, value filters, and type filters. -// If at least one [Ignore] exists in S, then the comparison is ignored. -// If the number of [Transformer] and [Comparer] options in S is non-zero, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single [Transformer], then use that to transform -// the current values and recursively call Equal on the output values. -// If S contains a single [Comparer], then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. -// -// - If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. -// -// - Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, -// and channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. -// -// Structs are equal if recursively calling Equal on all fields report equal. -// If a struct contains unexported fields, Equal panics unless an [Ignore] option -// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field -// or the [Exporter] option explicitly permits comparing the unexported field. -// -// Slices are equal if they are both nil or both non-nil, where recursively -// calling Equal on all non-ignored slice or array elements report equal. -// Empty non-nil slices and nil slices are not equal; to equate empty slices, -// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. -// -// Maps are equal if they are both nil or both non-nil, where recursively -// calling Equal on all non-ignored map entries report equal. -// Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using -// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. -// Empty non-nil maps and nil maps are not equal; to equate empty maps, -// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. -// -// Pointers and interfaces are equal if they are both nil or both non-nil, -// where they have the same underlying concrete type and recursively -// calling Equal on the underlying values reports equal. -// -// Before recursing into a pointer, slice element, or map, the current path -// is checked to detect whether the address has already been visited. -// If there is a cycle, then the pointed at values are considered equal -// only if both addresses were previously visited in the same path step. -func Equal(x, y interface{}, opts ...Option) bool { - s := newState(opts) - s.compareAny(rootStep(x, y)) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values: -// y - x. It returns an empty string if and only if Equal returns true for the -// same input values and options. -// -// The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added from y, and the lack of a prefix -// indicates an element common to both x and y. If possible, the output -// uses fmt.Stringer.String or error.Error methods to produce more humanly -// readable outputs. In such cases, the string is prefixed with either an -// 's' or 'e' character, respectively, to indicate that the method was called. -// -// Do not depend on this output being stable. If you need the ability to -// programmatically interpret the difference, consider using a custom Reporter. -func Diff(x, y interface{}, opts ...Option) string { - s := newState(opts) - - // Optimization: If there are no other reporters, we can optimize for the - // common case where the result is equal (and thus no reported difference). - // This avoids the expensive construction of a difference tree. - if len(s.reporters) == 0 { - s.compareAny(rootStep(x, y)) - if s.result.Equal() { - return "" - } - s.result = diff.Result{} // Reset results - } - - r := new(defaultReporter) - s.reporters = append(s.reporters, reporter{r}) - s.compareAny(rootStep(x, y)) - d := r.String() - if (d == "") != s.result.Equal() { - panic("inconsistent difference and equality results") - } - return d -} - -// rootStep constructs the first path step. If x and y have differing types, -// then they are stored within an empty interface type. -func rootStep(x, y interface{}) PathStep { - vx := reflect.ValueOf(x) - vy := reflect.ValueOf(y) - - // If the inputs are different types, auto-wrap them in an empty interface - // so that they have the same parent type. - var t reflect.Type - if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = anyType - if vx.IsValid() { - vvx := reflect.New(t).Elem() - vvx.Set(vx) - vx = vvx - } - if vy.IsValid() { - vvy := reflect.New(t).Elem() - vvy.Set(vy) - vy = vvy - } - } else { - t = vx.Type() - } - - return &pathStep{t, vx, vy} -} - -type state struct { - // These fields represent the "comparison state". - // Calling statelessCompare must not result in observable changes to these. - result diff.Result // The current result of comparison - curPath Path // The current path in the value tree - curPtrs pointerPath // The current set of visited pointers - reporters []reporter // Optional reporters - - // recChecker checks for infinite cycles applying the same set of - // transformers upon the output of itself. - recChecker recChecker - - // dynChecker triggers pseudo-random checks for option correctness. - // It is safe for statelessCompare to mutate this value. - dynChecker dynChecker - - // These fields, once set by processOption, will not change. - exporters []exporter // List of exporters for structs with unexported fields - opts Options // List of all fundamental and filter options -} - -func newState(opts []Option) *state { - // Always ensure a validator option exists to validate the inputs. - s := &state{opts: Options{validator{}}} - s.curPtrs.Init() - s.processOption(Options(opts)) - return s -} - -func (s *state) processOption(opt Option) { - switch opt := opt.(type) { - case nil: - case Options: - for _, o := range opt { - s.processOption(o) - } - case coreOption: - type filtered interface { - isFiltered() bool - } - if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { - panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) - } - s.opts = append(s.opts, opt) - case exporter: - s.exporters = append(s.exporters, opt) - case reporter: - s.reporters = append(s.reporters, opt) - default: - panic(fmt.Sprintf("unknown option %T", opt)) - } -} - -// statelessCompare compares two values and returns the result. -// This function is stateless in that it does not alter the current result, -// or output to any registered reporters. -func (s *state) statelessCompare(step PathStep) diff.Result { - // We do not save and restore curPath and curPtrs because all of the - // compareX methods should properly push and pop from them. - // It is an implementation bug if the contents of the paths differ from - // when calling this function to when returning from it. - - oldResult, oldReporters := s.result, s.reporters - s.result = diff.Result{} // Reset result - s.reporters = nil // Remove reporters to avoid spurious printouts - s.compareAny(step) - res := s.result - s.result, s.reporters = oldResult, oldReporters - return res -} - -func (s *state) compareAny(step PathStep) { - // Update the path stack. - s.curPath.push(step) - defer s.curPath.pop() - for _, r := range s.reporters { - r.PushStep(step) - defer r.PopStep() - } - s.recChecker.Check(s.curPath) - - // Cycle-detection for slice elements (see NOTE in compareSlice). - t := step.Type() - vx, vy := step.Values() - if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { - px, py := vx.Addr(), vy.Addr() - if eq, visited := s.curPtrs.Push(px, py); visited { - s.report(eq, reportByCycle) - return - } - defer s.curPtrs.Pop(px, py) - } - - // Rule 1: Check whether an option applies on this node in the value tree. - if s.tryOptions(t, vx, vy) { - return - } - - // Rule 2: Check whether the type has a valid Equal method. - if s.tryMethod(t, vx, vy) { - return - } - - // Rule 3: Compare based on the underlying kind. - switch t.Kind() { - case reflect.Bool: - s.report(vx.Bool() == vy.Bool(), 0) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s.report(vx.Int() == vy.Int(), 0) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s.report(vx.Uint() == vy.Uint(), 0) - case reflect.Float32, reflect.Float64: - s.report(vx.Float() == vy.Float(), 0) - case reflect.Complex64, reflect.Complex128: - s.report(vx.Complex() == vy.Complex(), 0) - case reflect.String: - s.report(vx.String() == vy.String(), 0) - case reflect.Chan, reflect.UnsafePointer: - s.report(vx.Pointer() == vy.Pointer(), 0) - case reflect.Func: - s.report(vx.IsNil() && vy.IsNil(), 0) - case reflect.Struct: - s.compareStruct(t, vx, vy) - case reflect.Slice, reflect.Array: - s.compareSlice(t, vx, vy) - case reflect.Map: - s.compareMap(t, vx, vy) - case reflect.Ptr: - s.comparePtr(t, vx, vy) - case reflect.Interface: - s.compareInterface(t, vx, vy) - default: - panic(fmt.Sprintf("%v kind not handled", t.Kind())) - } -} - -func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { - // Evaluate all filters and apply the remaining options. - if opt := s.opts.filter(s, t, vx, vy); opt != nil { - opt.apply(s, vx, vy) - return true - } - return false -} - -func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { - // Check if this type even has an Equal method. - m, ok := t.MethodByName("Equal") - if !ok || !function.IsType(m.Type, function.EqualAssignable) { - return false - } - - eq := s.callTTBFunc(m.Func, vx, vy) - s.report(eq, reportByMethod) - return true -} - -func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{v})[0] - } - - // Run the function twice and ensure that we get the same results back. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, v) - got := <-c - want := f.Call([]reflect.Value{v})[0] - if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { - // To avoid false-positives with non-reflexive equality operations, - // we sanity check whether a value is equal to itself. - if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { - return want - } - panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) - } - return want -} - -func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{x, y})[0].Bool() - } - - // Swapping the input arguments is sufficient to check that - // f is symmetric and deterministic. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, y, x) - got := <-c - want := f.Call([]reflect.Value{x, y})[0].Bool() - if !got.IsValid() || got.Bool() != want { - panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) - } - return want -} - -func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { - var ret reflect.Value - defer func() { - recover() // Ignore panics, let the other call to f panic instead - c <- ret - }() - ret = f.Call(vs)[0] -} - -func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { - var addr bool - var vax, vay reflect.Value // Addressable versions of vx and vy - - var mayForce, mayForceInit bool - step := StructField{&structField{}} - for i := 0; i < t.NumField(); i++ { - step.typ = t.Field(i).Type - step.vx = vx.Field(i) - step.vy = vy.Field(i) - step.name = t.Field(i).Name - step.idx = i - step.unexported = !isExported(step.name) - if step.unexported { - if step.name == "_" { - continue - } - // Defer checking of unexported fields until later to give an - // Ignore a chance to ignore the field. - if !vax.IsValid() || !vay.IsValid() { - // For retrieveUnexportedField to work, the parent struct must - // be addressable. Create a new copy of the values if - // necessary to make them addressable. - addr = vx.CanAddr() || vy.CanAddr() - vax = makeAddressable(vx) - vay = makeAddressable(vy) - } - if !mayForceInit { - for _, xf := range s.exporters { - mayForce = mayForce || xf(t) - } - mayForceInit = true - } - step.mayForce = mayForce - step.paddr = addr - step.pvx = vax - step.pvy = vay - step.field = t.Field(i) - } - s.compareAny(step) - } -} - -func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { - isSlice := t.Kind() == reflect.Slice - if isSlice && (vx.IsNil() || vy.IsNil()) { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - - // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer - // since slices represents a list of pointers, rather than a single pointer. - // The pointer checking logic must be handled on a per-element basis - // in compareAny. - // - // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting - // pointer P, a length N, and a capacity C. Supposing each slice element has - // a memory size of M, then the slice is equivalent to the list of pointers: - // [P+i*M for i in range(N)] - // - // For example, v[:0] and v[:1] are slices with the same starting pointer, - // but they are clearly different values. Using the slice pointer alone - // violates the assumption that equal pointers implies equal values. - - step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} - withIndexes := func(ix, iy int) SliceIndex { - if ix >= 0 { - step.vx, step.xkey = vx.Index(ix), ix - } else { - step.vx, step.xkey = reflect.Value{}, -1 - } - if iy >= 0 { - step.vy, step.ykey = vy.Index(iy), iy - } else { - step.vy, step.ykey = reflect.Value{}, -1 - } - return step - } - - // Ignore options are able to ignore missing elements in a slice. - // However, detecting these reliably requires an optimal differencing - // algorithm, for which diff.Difference is not. - // - // Instead, we first iterate through both slices to detect which elements - // would be ignored if standing alone. The index of non-discarded elements - // are stored in a separate slice, which diffing is then performed on. - var indexesX, indexesY []int - var ignoredX, ignoredY []bool - for ix := 0; ix < vx.Len(); ix++ { - ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 - if !ignored { - indexesX = append(indexesX, ix) - } - ignoredX = append(ignoredX, ignored) - } - for iy := 0; iy < vy.Len(); iy++ { - ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 - if !ignored { - indexesY = append(indexesY, iy) - } - ignoredY = append(ignoredY, ignored) - } - - // Compute an edit-script for slices vx and vy (excluding ignored elements). - edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { - return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) - }) - - // Replay the ignore-scripts and the edit-script. - var ix, iy int - for ix < vx.Len() || iy < vy.Len() { - var e diff.EditType - switch { - case ix < len(ignoredX) && ignoredX[ix]: - e = diff.UniqueX - case iy < len(ignoredY) && ignoredY[iy]: - e = diff.UniqueY - default: - e, edits = edits[0], edits[1:] - } - switch e { - case diff.UniqueX: - s.compareAny(withIndexes(ix, -1)) - ix++ - case diff.UniqueY: - s.compareAny(withIndexes(-1, iy)) - iy++ - default: - s.compareAny(withIndexes(ix, iy)) - ix++ - iy++ - } - } -} - -func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - - // Cycle-detection for maps. - if eq, visited := s.curPtrs.Push(vx, vy); visited { - s.report(eq, reportByCycle) - return - } - defer s.curPtrs.Pop(vx, vy) - - // We combine and sort the two map keys so that we can perform the - // comparisons in a deterministic order. - step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} - for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { - step.vx = vx.MapIndex(k) - step.vy = vy.MapIndex(k) - step.key = k - if !step.vx.IsValid() && !step.vy.IsValid() { - // It is possible for both vx and vy to be invalid if the - // key contained a NaN value in it. - // - // Even with the ability to retrieve NaN keys in Go 1.12, - // there still isn't a sensible way to compare the values since - // a NaN key may map to multiple unordered values. - // The most reasonable way to compare NaNs would be to compare the - // set of values. However, this is impossible to do efficiently - // since set equality is provably an O(n^2) operation given only - // an Equal function. If we had a Less function or Hash function, - // this could be done in O(n*log(n)) or O(n), respectively. - // - // Rather than adding complex logic to deal with NaNs, make it - // the user's responsibility to compare such obscure maps. - const help = "consider providing a Comparer to compare the map" - panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) - } - s.compareAny(step) - } -} - -func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - - // Cycle-detection for pointers. - if eq, visited := s.curPtrs.Push(vx, vy); visited { - s.report(eq, reportByCycle) - return - } - defer s.curPtrs.Pop(vx, vy) - - vx, vy = vx.Elem(), vy.Elem() - s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) -} - -func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - vx, vy = vx.Elem(), vy.Elem() - if vx.Type() != vy.Type() { - s.report(false, 0) - return - } - s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) -} - -func (s *state) report(eq bool, rf resultFlags) { - if rf&reportByIgnore == 0 { - if eq { - s.result.NumSame++ - rf |= reportEqual - } else { - s.result.NumDiff++ - rf |= reportUnequal - } - } - for _, r := range s.reporters { - r.Report(Result{flags: rf}) - } -} - -// recChecker tracks the state needed to periodically perform checks that -// user provided transformers are not stuck in an infinitely recursive cycle. -type recChecker struct{ next int } - -// Check scans the Path for any recursive transformers and panics when any -// recursive transformers are detected. Note that the presence of a -// recursive Transformer does not necessarily imply an infinite cycle. -// As such, this check only activates after some minimal number of path steps. -func (rc *recChecker) Check(p Path) { - const minLen = 1 << 16 - if rc.next == 0 { - rc.next = minLen - } - if len(p) < rc.next { - return - } - rc.next <<= 1 - - // Check whether the same transformer has appeared at least twice. - var ss []string - m := map[Option]int{} - for _, ps := range p { - if t, ok := ps.(Transform); ok { - t := t.Option() - if m[t] == 1 { // Transformer was used exactly once before - tf := t.(*transformer).fnc.Type() - ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) - } - m[t]++ - } - } - if len(ss) > 0 { - const warning = "recursive set of Transformers detected" - const help = "consider using cmpopts.AcyclicTransformer" - set := strings.Join(ss, "\n\t") - panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) - } -} - -// dynChecker tracks the state needed to periodically perform checks that -// user provided functions are symmetric and deterministic. -// The zero value is safe for immediate use. -type dynChecker struct{ curr, next int } - -// Next increments the state and reports whether a check should be performed. -// -// Checks occur every Nth function call, where N is a triangular number: -// -// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... -// -// See https://en.wikipedia.org/wiki/Triangular_number -// -// This sequence ensures that the cost of checks drops significantly as -// the number of functions calls grows larger. -func (dc *dynChecker) Next() bool { - ok := dc.curr == dc.next - if ok { - dc.curr = 0 - dc.next++ - } - dc.curr++ - return ok -} - -// makeAddressable returns a value that is always addressable. -// It returns the input verbatim if it is already addressable, -// otherwise it creates a new value and returns an addressable copy. -func makeAddressable(v reflect.Value) reflect.Value { - if v.CanAddr() { - return v - } - vc := reflect.New(v.Type()).Elem() - vc.Set(v) - return vc -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/export.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/export.go deleted file mode 100644 index 29f82fe6b2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/export.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "reflect" - "unsafe" -) - -// retrieveUnexportedField uses unsafe to forcibly retrieve any field from -// a struct such that the value has read-write permissions. -// -// The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. If addr is false, -// then the returned value will be shallowed copied to be non-addressable. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { - ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() - if !addr { - // A field is addressable if and only if the struct is addressable. - // If the original parent value was not addressable, shallow copy the - // value to make it non-addressable to avoid leaking an implementation - // detail of how forcibly exporting a field works. - if ve.Kind() == reflect.Interface && ve.IsNil() { - return reflect.Zero(f.Type) - } - return reflect.ValueOf(ve.Interface()).Convert(f.Type) - } - return ve -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go deleted file mode 100644 index 36062a604c..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !cmp_debug -// +build !cmp_debug - -package diff - -var debug debugger - -type debugger struct{} - -func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { - return f -} -func (debugger) Update() {} -func (debugger) Finish() {} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go deleted file mode 100644 index a3b97a1ad5..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build cmp_debug -// +build cmp_debug - -package diff - -import ( - "fmt" - "strings" - "sync" - "time" -) - -// The algorithm can be seen running in real-time by enabling debugging: -// go test -tags=cmp_debug -v -// -// Example output: -// === RUN TestDifference/#34 -// ┌───────────────────────────────┐ -// │ \ · · · · · · · · · · · · · · │ -// │ · # · · · · · · · · · · · · · │ -// │ · \ · · · · · · · · · · · · · │ -// │ · · \ · · · · · · · · · · · · │ -// │ · · · X # · · · · · · · · · · │ -// │ · · · # \ · · · · · · · · · · │ -// │ · · · · · # # · · · · · · · · │ -// │ · · · · · # \ · · · · · · · · │ -// │ · · · · · · · \ · · · · · · · │ -// │ · · · · · · · · \ · · · · · · │ -// │ · · · · · · · · · \ · · · · · │ -// │ · · · · · · · · · · \ · · # · │ -// │ · · · · · · · · · · · \ # # · │ -// │ · · · · · · · · · · · # # # · │ -// │ · · · · · · · · · · # # # # · │ -// │ · · · · · · · · · # # # # # · │ -// │ · · · · · · · · · · · · · · \ │ -// └───────────────────────────────┘ -// [.Y..M.XY......YXYXY.|] -// -// The grid represents the edit-graph where the horizontal axis represents -// list X and the vertical axis represents list Y. The start of the two lists -// is the top-left, while the ends are the bottom-right. The '·' represents -// an unexplored node in the graph. The '\' indicates that the two symbols -// from list X and Y are equal. The 'X' indicates that two symbols are similar -// (but not exactly equal) to each other. The '#' indicates that the two symbols -// are different (and not similar). The algorithm traverses this graph trying to -// make the paths starting in the top-left and the bottom-right connect. -// -// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents -// the currently established path from the forward and reverse searches, -// separated by a '|' character. - -const ( - updateDelay = 100 * time.Millisecond - finishDelay = 500 * time.Millisecond - ansiTerminal = true // ANSI escape codes used to move terminal cursor -) - -var debug debugger - -type debugger struct { - sync.Mutex - p1, p2 EditScript - fwdPath, revPath *EditScript - grid []byte - lines int -} - -func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { - dbg.Lock() - dbg.fwdPath, dbg.revPath = p1, p2 - top := "┌─" + strings.Repeat("──", nx) + "┐\n" - row := "│ " + strings.Repeat("· ", nx) + "│\n" - btm := "└─" + strings.Repeat("──", nx) + "┘\n" - dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) - dbg.lines = strings.Count(dbg.String(), "\n") - fmt.Print(dbg) - - // Wrap the EqualFunc so that we can intercept each result. - return func(ix, iy int) (r Result) { - cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] - for i := range cell { - cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot - } - switch r = f(ix, iy); { - case r.Equal(): - cell[0] = '\\' - case r.Similar(): - cell[0] = 'X' - default: - cell[0] = '#' - } - return - } -} - -func (dbg *debugger) Update() { - dbg.print(updateDelay) -} - -func (dbg *debugger) Finish() { - dbg.print(finishDelay) - dbg.Unlock() -} - -func (dbg *debugger) String() string { - dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] - for i := len(*dbg.revPath) - 1; i >= 0; i-- { - dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) - } - return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) -} - -func (dbg *debugger) print(d time.Duration) { - if ansiTerminal { - fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor - } - fmt.Print(dbg) - time.Sleep(d) -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go deleted file mode 100644 index a248e5436d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package diff implements an algorithm for producing edit-scripts. -// The edit-script is a sequence of operations needed to transform one list -// of symbols into another (or vice-versa). The edits allowed are insertions, -// deletions, and modifications. The summation of all edits is called the -// Levenshtein distance as this problem is well-known in computer science. -// -// This package prioritizes performance over accuracy. That is, the run time -// is more important than obtaining a minimal Levenshtein distance. -package diff - -import ( - "math/rand" - "time" - - "github.com/google/go-cmp/cmp/internal/flags" -) - -// EditType represents a single operation within an edit-script. -type EditType uint8 - -const ( - // Identity indicates that a symbol pair is identical in both list X and Y. - Identity EditType = iota - // UniqueX indicates that a symbol only exists in X and not Y. - UniqueX - // UniqueY indicates that a symbol only exists in Y and not X. - UniqueY - // Modified indicates that a symbol pair is a modification of each other. - Modified -) - -// EditScript represents the series of differences between two lists. -type EditScript []EditType - -// String returns a human-readable string representing the edit-script where -// Identity, UniqueX, UniqueY, and Modified are represented by the -// '.', 'X', 'Y', and 'M' characters, respectively. -func (es EditScript) String() string { - b := make([]byte, len(es)) - for i, e := range es { - switch e { - case Identity: - b[i] = '.' - case UniqueX: - b[i] = 'X' - case UniqueY: - b[i] = 'Y' - case Modified: - b[i] = 'M' - default: - panic("invalid edit-type") - } - } - return string(b) -} - -// stats returns a histogram of the number of each type of edit operation. -func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { - for _, e := range es { - switch e { - case Identity: - s.NI++ - case UniqueX: - s.NX++ - case UniqueY: - s.NY++ - case Modified: - s.NM++ - default: - panic("invalid edit-type") - } - } - return -} - -// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if -// lists X and Y are equal. -func (es EditScript) Dist() int { return len(es) - es.stats().NI } - -// LenX is the length of the X list. -func (es EditScript) LenX() int { return len(es) - es.stats().NY } - -// LenY is the length of the Y list. -func (es EditScript) LenY() int { return len(es) - es.stats().NX } - -// EqualFunc reports whether the symbols at indexes ix and iy are equal. -// When called by Difference, the index is guaranteed to be within nx and ny. -type EqualFunc func(ix int, iy int) Result - -// Result is the result of comparison. -// NumSame is the number of sub-elements that are equal. -// NumDiff is the number of sub-elements that are not equal. -type Result struct{ NumSame, NumDiff int } - -// BoolResult returns a Result that is either Equal or not Equal. -func BoolResult(b bool) Result { - if b { - return Result{NumSame: 1} // Equal, Similar - } else { - return Result{NumDiff: 2} // Not Equal, not Similar - } -} - -// Equal indicates whether the symbols are equal. Two symbols are equal -// if and only if NumDiff == 0. If Equal, then they are also Similar. -func (r Result) Equal() bool { return r.NumDiff == 0 } - -// Similar indicates whether two symbols are similar and may be represented -// by using the Modified type. As a special case, we consider binary comparisons -// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. -// -// The exact ratio of NumSame to NumDiff to determine similarity may change. -func (r Result) Similar() bool { - // Use NumSame+1 to offset NumSame so that binary comparisons are similar. - return r.NumSame+1 >= r.NumDiff -} - -var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 - -// Difference reports whether two lists of lengths nx and ny are equal -// given the definition of equality provided as f. -// -// This function returns an edit-script, which is a sequence of operations -// needed to convert one list into the other. The following invariants for -// the edit-script are maintained: -// - eq == (es.Dist()==0) -// - nx == es.LenX() -// - ny == es.LenY() -// -// This algorithm is not guaranteed to be an optimal solution (i.e., one that -// produces an edit-script with a minimal Levenshtein distance). This algorithm -// favors performance over optimality. The exact output is not guaranteed to -// be stable and may change over time. -func Difference(nx, ny int, f EqualFunc) (es EditScript) { - // This algorithm is based on traversing what is known as an "edit-graph". - // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" - // by Eugene W. Myers. Since D can be as large as N itself, this is - // effectively O(N^2). Unlike the algorithm from that paper, we are not - // interested in the optimal path, but at least some "decent" path. - // - // For example, let X and Y be lists of symbols: - // X = [A B C A B B A] - // Y = [C B A B A C] - // - // The edit-graph can be drawn as the following: - // A B C A B B A - // ┌─────────────┐ - // C │_|_|\|_|_|_|_│ 0 - // B │_|\|_|_|\|\|_│ 1 - // A │\|_|_|\|_|_|\│ 2 - // B │_|\|_|_|\|\|_│ 3 - // A │\|_|_|\|_|_|\│ 4 - // C │ | |\| | | | │ 5 - // └─────────────┘ 6 - // 0 1 2 3 4 5 6 7 - // - // List X is written along the horizontal axis, while list Y is written - // along the vertical axis. At any point on this grid, if the symbol in - // list X matches the corresponding symbol in list Y, then a '\' is drawn. - // The goal of any minimal edit-script algorithm is to find a path from the - // top-left corner to the bottom-right corner, while traveling through the - // fewest horizontal or vertical edges. - // A horizontal edge is equivalent to inserting a symbol from list X. - // A vertical edge is equivalent to inserting a symbol from list Y. - // A diagonal edge is equivalent to a matching symbol between both X and Y. - - // Invariants: - // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny - // - // In general: - // - fwdFrontier.X < revFrontier.X - // - fwdFrontier.Y < revFrontier.Y - // - // Unless, it is time for the algorithm to terminate. - fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} - revPath := path{-1, point{nx, ny}, make(EditScript, 0)} - fwdFrontier := fwdPath.point // Forward search frontier - revFrontier := revPath.point // Reverse search frontier - - // Search budget bounds the cost of searching for better paths. - // The longest sequence of non-matching symbols that can be tolerated is - // approximately the square-root of the search budget. - searchBudget := 4 * (nx + ny) // O(n) - - // Running the tests with the "cmp_debug" build tag prints a visualization - // of the algorithm running in real-time. This is educational for - // understanding how the algorithm works. See debug_enable.go. - f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) - - // The algorithm below is a greedy, meet-in-the-middle algorithm for - // computing sub-optimal edit-scripts between two lists. - // - // The algorithm is approximately as follows: - // - Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). - // The goal of the search is connect with the search - // from the opposite corner. - // - As we search, we build a path in a greedy manner, - // where the first match seen is added to the path (this is sub-optimal, - // but provides a decent result in practice). When matches are found, - // we try the next pair of symbols in the lists and follow all matches - // as far as possible. - // - When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, - // we advance the frontier towards the opposite corner. - // - This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. - - // This algorithm is correct even if searching only in the forward direction - // or in the reverse direction. We do both because it is commonly observed - // that two lists commonly differ because elements were added to the front - // or end of the other list. - // - // Non-deterministically start with either the forward or reverse direction - // to introduce some deliberate instability so that we have the flexibility - // to change this algorithm in the future. - if flags.Deterministic || randBool { - goto forwardSearch - } else { - goto reverseSearch - } - -forwardSearch: - { - // Forward search from the beginning. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - goto finishSearch - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{fwdFrontier.X + z, fwdFrontier.Y - z} - switch { - case p.X >= revPath.X || p.Y < fwdPath.Y: - stop1 = true // Hit top-right corner - case p.Y >= revPath.Y || p.X < fwdPath.X: - stop2 = true // Hit bottom-left corner - case f(p.X, p.Y).Equal(): - // Match found, so connect the path to this point. - fwdPath.connect(p, f) - fwdPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(fwdPath.X, fwdPath.Y).Equal() { - break - } - fwdPath.append(Identity) - } - fwdFrontier = fwdPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards reverse point. - if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { - fwdFrontier.X++ - } else { - fwdFrontier.Y++ - } - goto reverseSearch - } - -reverseSearch: - { - // Reverse search from the end. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - goto finishSearch - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{revFrontier.X - z, revFrontier.Y + z} - switch { - case fwdPath.X >= p.X || revPath.Y < p.Y: - stop1 = true // Hit bottom-left corner - case fwdPath.Y >= p.Y || revPath.X < p.X: - stop2 = true // Hit top-right corner - case f(p.X-1, p.Y-1).Equal(): - // Match found, so connect the path to this point. - revPath.connect(p, f) - revPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(revPath.X-1, revPath.Y-1).Equal() { - break - } - revPath.append(Identity) - } - revFrontier = revPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards forward point. - if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { - revFrontier.X-- - } else { - revFrontier.Y-- - } - goto forwardSearch - } - -finishSearch: - // Join the forward and reverse paths and then append the reverse path. - fwdPath.connect(revPath.point, f) - for i := len(revPath.es) - 1; i >= 0; i-- { - t := revPath.es[i] - revPath.es = revPath.es[:i] - fwdPath.append(t) - } - debug.Finish() - return fwdPath.es -} - -type path struct { - dir int // +1 if forward, -1 if reverse - point // Leading point of the EditScript path - es EditScript -} - -// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types -// to the edit-script to connect p.point to dst. -func (p *path) connect(dst point, f EqualFunc) { - if p.dir > 0 { - // Connect in forward direction. - for dst.X > p.X && dst.Y > p.Y { - switch r := f(p.X, p.Y); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case dst.X-p.X >= dst.Y-p.Y: - p.append(UniqueX) - default: - p.append(UniqueY) - } - } - for dst.X > p.X { - p.append(UniqueX) - } - for dst.Y > p.Y { - p.append(UniqueY) - } - } else { - // Connect in reverse direction. - for p.X > dst.X && p.Y > dst.Y { - switch r := f(p.X-1, p.Y-1); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case p.Y-dst.Y >= p.X-dst.X: - p.append(UniqueY) - default: - p.append(UniqueX) - } - } - for p.X > dst.X { - p.append(UniqueX) - } - for p.Y > dst.Y { - p.append(UniqueY) - } - } -} - -func (p *path) append(t EditType) { - p.es = append(p.es, t) - switch t { - case Identity, Modified: - p.add(p.dir, p.dir) - case UniqueX: - p.add(p.dir, 0) - case UniqueY: - p.add(0, p.dir) - } - debug.Update() -} - -type point struct{ X, Y int } - -func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } - -// zigzag maps a consecutive sequence of integers to a zig-zag sequence. -// -// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] -func zigzag(x int) int { - if x&1 != 0 { - x = ^x - } - return x >> 1 -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go deleted file mode 100644 index d8e459c9b9..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -// Deterministic controls whether the output of Diff should be deterministic. -// This is only used for testing. -var Deterministic bool diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/function/func.go deleted file mode 100644 index def01a6be3..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package function provides functionality for identifying function types. -package function - -import ( - "reflect" - "regexp" - "runtime" - "strings" -) - -type funcType int - -const ( - _ funcType = iota - - tbFunc // func(T) bool - ttbFunc // func(T, T) bool - ttiFunc // func(T, T) int - trbFunc // func(T, R) bool - tibFunc // func(T, I) bool - trFunc // func(T) R - - Equal = ttbFunc // func(T, T) bool - EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool - Transformer = trFunc // func(T) R - ValueFilter = ttbFunc // func(T, T) bool - Less = ttbFunc // func(T, T) bool - Compare = ttiFunc // func(T, T) int - ValuePredicate = tbFunc // func(T) bool - KeyValuePredicate = trbFunc // func(T, R) bool -) - -var boolType = reflect.TypeOf(true) -var intType = reflect.TypeOf(0) - -// IsType reports whether the reflect.Type is of the specified function type. -func IsType(t reflect.Type, ft funcType) bool { - if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { - return false - } - ni, no := t.NumIn(), t.NumOut() - switch ft { - case tbFunc: // func(T) bool - if ni == 1 && no == 1 && t.Out(0) == boolType { - return true - } - case ttbFunc: // func(T, T) bool - if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { - return true - } - case ttiFunc: // func(T, T) int - if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { - return true - } - case trbFunc: // func(T, R) bool - if ni == 2 && no == 1 && t.Out(0) == boolType { - return true - } - case tibFunc: // func(T, I) bool - if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { - return true - } - case trFunc: // func(T) R - if ni == 1 && no == 1 { - return true - } - } - return false -} - -var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) - -// NameOf returns the name of the function value. -func NameOf(v reflect.Value) string { - fnc := runtime.FuncForPC(v.Pointer()) - if fnc == nil { - return "" - } - fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" - - // Method closures have a "-fm" suffix. - fullName = strings.TrimSuffix(fullName, "-fm") - - var name string - for len(fullName) > 0 { - inParen := strings.HasSuffix(fullName, ")") - fullName = strings.TrimSuffix(fullName, ")") - - s := lastIdentRx.FindString(fullName) - if s == "" { - break - } - name = s + "." + name - fullName = strings.TrimSuffix(fullName, s) - - if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { - fullName = fullName[:i] - } - fullName = strings.TrimSuffix(fullName, ".") - } - return strings.TrimSuffix(name, ".") -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/name.go deleted file mode 100644 index 7b498bb2cb..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2020, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "reflect" - "strconv" -) - -var anyType = reflect.TypeOf((*interface{})(nil)).Elem() - -// TypeString is nearly identical to reflect.Type.String, -// but has an additional option to specify that full type names be used. -func TypeString(t reflect.Type, qualified bool) string { - return string(appendTypeName(nil, t, qualified, false)) -} - -func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { - // BUG: Go reflection provides no way to disambiguate two named types - // of the same name and within the same package, - // but declared within the namespace of different functions. - - // Use the "any" alias instead of "interface{}" for better readability. - if t == anyType { - return append(b, "any"...) - } - - // Named type. - if t.Name() != "" { - if qualified && t.PkgPath() != "" { - b = append(b, '"') - b = append(b, t.PkgPath()...) - b = append(b, '"') - b = append(b, '.') - b = append(b, t.Name()...) - } else { - b = append(b, t.String()...) - } - return b - } - - // Unnamed type. - switch k := t.Kind(); k { - case reflect.Bool, reflect.String, reflect.UnsafePointer, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - b = append(b, k.String()...) - case reflect.Chan: - if t.ChanDir() == reflect.RecvDir { - b = append(b, "<-"...) - } - b = append(b, "chan"...) - if t.ChanDir() == reflect.SendDir { - b = append(b, "<-"...) - } - b = append(b, ' ') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Func: - if !elideFunc { - b = append(b, "func"...) - } - b = append(b, '(') - for i := 0; i < t.NumIn(); i++ { - if i > 0 { - b = append(b, ", "...) - } - if i == t.NumIn()-1 && t.IsVariadic() { - b = append(b, "..."...) - b = appendTypeName(b, t.In(i).Elem(), qualified, false) - } else { - b = appendTypeName(b, t.In(i), qualified, false) - } - } - b = append(b, ')') - switch t.NumOut() { - case 0: - // Do nothing - case 1: - b = append(b, ' ') - b = appendTypeName(b, t.Out(0), qualified, false) - default: - b = append(b, " ("...) - for i := 0; i < t.NumOut(); i++ { - if i > 0 { - b = append(b, ", "...) - } - b = appendTypeName(b, t.Out(i), qualified, false) - } - b = append(b, ')') - } - case reflect.Struct: - b = append(b, "struct{ "...) - for i := 0; i < t.NumField(); i++ { - if i > 0 { - b = append(b, "; "...) - } - sf := t.Field(i) - if !sf.Anonymous { - if qualified && sf.PkgPath != "" { - b = append(b, '"') - b = append(b, sf.PkgPath...) - b = append(b, '"') - b = append(b, '.') - } - b = append(b, sf.Name...) - b = append(b, ' ') - } - b = appendTypeName(b, sf.Type, qualified, false) - if sf.Tag != "" { - b = append(b, ' ') - b = strconv.AppendQuote(b, string(sf.Tag)) - } - } - if b[len(b)-1] == ' ' { - b = b[:len(b)-1] - } else { - b = append(b, ' ') - } - b = append(b, '}') - case reflect.Slice, reflect.Array: - b = append(b, '[') - if k == reflect.Array { - b = strconv.AppendUint(b, uint64(t.Len()), 10) - } - b = append(b, ']') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Map: - b = append(b, "map["...) - b = appendTypeName(b, t.Key(), qualified, false) - b = append(b, ']') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Ptr: - b = append(b, '*') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Interface: - b = append(b, "interface{ "...) - for i := 0; i < t.NumMethod(); i++ { - if i > 0 { - b = append(b, "; "...) - } - m := t.Method(i) - if qualified && m.PkgPath != "" { - b = append(b, '"') - b = append(b, m.PkgPath...) - b = append(b, '"') - b = append(b, '.') - } - b = append(b, m.Name...) - b = appendTypeName(b, m.Type, qualified, true) - } - if b[len(b)-1] == ' ' { - b = b[:len(b)-1] - } else { - b = append(b, ' ') - } - b = append(b, '}') - default: - panic("invalid kind: " + k.String()) - } - return b -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go deleted file mode 100644 index e5dfff69af..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "reflect" - "unsafe" -) - -// Pointer is an opaque typed pointer and is guaranteed to be comparable. -type Pointer struct { - p unsafe.Pointer - t reflect.Type -} - -// PointerOf returns a Pointer from v, which must be a -// reflect.Ptr, reflect.Slice, or reflect.Map. -func PointerOf(v reflect.Value) Pointer { - // The proper representation of a pointer is unsafe.Pointer, - // which is necessary if the GC ever uses a moving collector. - return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} -} - -// IsNil reports whether the pointer is nil. -func (p Pointer) IsNil() bool { - return p.p == nil -} - -// Uintptr returns the pointer as a uintptr. -func (p Pointer) Uintptr() uintptr { - return uintptr(p.p) -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go deleted file mode 100644 index 98533b036c..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "fmt" - "math" - "reflect" - "sort" -) - -// SortKeys sorts a list of map keys, deduplicating keys if necessary. -// The type of each value must be comparable. -func SortKeys(vs []reflect.Value) []reflect.Value { - if len(vs) == 0 { - return vs - } - - // Sort the map keys. - sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) - - // Deduplicate keys (fails for NaNs). - vs2 := vs[:1] - for _, v := range vs[1:] { - if isLess(vs2[len(vs2)-1], v) { - vs2 = append(vs2, v) - } - } - return vs2 -} - -// isLess is a generic function for sorting arbitrary map keys. -// The inputs must be of the same type and must be comparable. -func isLess(x, y reflect.Value) bool { - switch x.Type().Kind() { - case reflect.Bool: - return !x.Bool() && y.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return x.Int() < y.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return x.Uint() < y.Uint() - case reflect.Float32, reflect.Float64: - // NOTE: This does not sort -0 as less than +0 - // since Go maps treat -0 and +0 as equal keys. - fx, fy := x.Float(), y.Float() - return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) - case reflect.Complex64, reflect.Complex128: - cx, cy := x.Complex(), y.Complex() - rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) - if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { - return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) - } - return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) - case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: - return x.Pointer() < y.Pointer() - case reflect.String: - return x.String() < y.String() - case reflect.Array: - for i := 0; i < x.Len(); i++ { - if isLess(x.Index(i), y.Index(i)) { - return true - } - if isLess(y.Index(i), x.Index(i)) { - return false - } - } - return false - case reflect.Struct: - for i := 0; i < x.NumField(); i++ { - if isLess(x.Field(i), y.Field(i)) { - return true - } - if isLess(y.Field(i), x.Field(i)) { - return false - } - } - return false - case reflect.Interface: - vx, vy := x.Elem(), y.Elem() - if !vx.IsValid() || !vy.IsValid() { - return !vx.IsValid() && vy.IsValid() - } - tx, ty := vx.Type(), vy.Type() - if tx == ty { - return isLess(x.Elem(), y.Elem()) - } - if tx.Kind() != ty.Kind() { - return vx.Kind() < vy.Kind() - } - if tx.String() != ty.String() { - return tx.String() < ty.String() - } - if tx.PkgPath() != ty.PkgPath() { - return tx.PkgPath() < ty.PkgPath() - } - // This can happen in rare situations, so we fallback to just comparing - // the unique pointer for a reflect.Type. This guarantees deterministic - // ordering within a program, but it is obviously not stable. - return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() - default: - // Must be Func, Map, or Slice; which are not comparable. - panic(fmt.Sprintf("%T is not comparable", x.Type())) - } -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/options.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/options.go deleted file mode 100644 index ba3fce81ff..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/options.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/google/go-cmp/cmp/internal/function" -) - -// Option configures for specific behavior of [Equal] and [Diff]. In particular, -// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), -// configure how equality is determined. -// -// The fundamental options may be composed with filters ([FilterPath] and -// [FilterValues]) to control the scope over which they are applied. -// -// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions -// for creating options that may be used with [Equal] and [Diff]. -type Option interface { - // filter applies all filters and returns the option that remains. - // Each option may only read s.curPath and call s.callTTBFunc. - // - // An Options is returned only if multiple comparers or transformers - // can apply simultaneously and will only contain values of those types - // or sub-Options containing values of those types. - filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption -} - -// applicableOption represents the following types: -// -// Fundamental: ignore | validator | *comparer | *transformer -// Grouping: Options -type applicableOption interface { - Option - - // apply executes the option, which may mutate s or panic. - apply(s *state, vx, vy reflect.Value) -} - -// coreOption represents the following types: -// -// Fundamental: ignore | validator | *comparer | *transformer -// Filters: *pathFilter | *valuesFilter -type coreOption interface { - Option - isCore() -} - -type core struct{} - -func (core) isCore() {} - -// Options is a list of [Option] values that also satisfies the [Option] interface. -// Helper comparison packages may return an Options value when packing multiple -// [Option] values into a single [Option]. When this package processes an Options, -// it will be implicitly expanded into a flat list. -// -// Applying a filter on an Options is equivalent to applying that same filter -// on all individual options held within. -type Options []Option - -func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { - for _, opt := range opts { - switch opt := opt.filter(s, t, vx, vy); opt.(type) { - case ignore: - return ignore{} // Only ignore can short-circuit evaluation - case validator: - out = validator{} // Takes precedence over comparer or transformer - case *comparer, *transformer, Options: - switch out.(type) { - case nil: - out = opt - case validator: - // Keep validator - case *comparer, *transformer, Options: - out = Options{out, opt} // Conflicting comparers or transformers - } - } - } - return out -} - -func (opts Options) apply(s *state, _, _ reflect.Value) { - const warning = "ambiguous set of applicable options" - const help = "consider using filters to ensure at most one Comparer or Transformer may apply" - var ss []string - for _, opt := range flattenOptions(nil, opts) { - ss = append(ss, fmt.Sprint(opt)) - } - set := strings.Join(ss, "\n\t") - panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) -} - -func (opts Options) String() string { - var ss []string - for _, opt := range opts { - ss = append(ss, fmt.Sprint(opt)) - } - return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) -} - -// FilterPath returns a new [Option] where opt is only evaluated if filter f -// returns true for the current [Path] in the value tree. -// -// This filter is called even if a slice element or map entry is missing and -// provides an opportunity to ignore such cases. The filter function must be -// symmetric such that the filter result is identical regardless of whether the -// missing value is from x or y. -// -// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or -// a previously filtered [Option]. -func FilterPath(f func(Path) bool, opt Option) Option { - if f == nil { - panic("invalid path filter function") - } - if opt := normalizeOption(opt); opt != nil { - return &pathFilter{fnc: f, opt: opt} - } - return nil -} - -type pathFilter struct { - core - fnc func(Path) bool - opt Option -} - -func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { - if f.fnc(s.curPath) { - return f.opt.filter(s, t, vx, vy) - } - return nil -} - -func (f pathFilter) String() string { - return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) -} - -// FilterValues returns a new [Option] where opt is only evaluated if filter f, -// which is a function of the form "func(T, T) bool", returns true for the -// current pair of values being compared. If either value is invalid or -// the type of the values is not assignable to T, then this filter implicitly -// returns false. -// -// The filter function must be -// symmetric (i.e., agnostic to the order of the inputs) and -// deterministic (i.e., produces the same result when given the same inputs). -// If T is an interface, it is possible that f is called with two values with -// different concrete types that both implement T. -// -// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or -// a previously filtered [Option]. -func FilterValues(f interface{}, opt Option) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { - panic(fmt.Sprintf("invalid values filter function: %T", f)) - } - if opt := normalizeOption(opt); opt != nil { - vf := &valuesFilter{fnc: v, opt: opt} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - vf.typ = ti - } - return vf - } - return nil -} - -type valuesFilter struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool - opt Option -} - -func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { - if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { - return nil - } - if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { - return f.opt.filter(s, t, vx, vy) - } - return nil -} - -func (f valuesFilter) String() string { - return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) -} - -// Ignore is an [Option] that causes all comparisons to be ignored. -// This value is intended to be combined with [FilterPath] or [FilterValues]. -// It is an error to pass an unfiltered Ignore option to [Equal]. -func Ignore() Option { return ignore{} } - -type ignore struct{ core } - -func (ignore) isFiltered() bool { return false } -func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } -func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } -func (ignore) String() string { return "Ignore()" } - -// validator is a sentinel Option type to indicate that some options could not -// be evaluated due to unexported fields, missing slice elements, or -// missing map entries. Both values are validator only for unexported fields. -type validator struct{ core } - -func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { - if !vx.IsValid() || !vy.IsValid() { - return validator{} - } - if !vx.CanInterface() || !vy.CanInterface() { - return validator{} - } - return nil -} -func (validator) apply(s *state, vx, vy reflect.Value) { - // Implies missing slice element or map entry. - if !vx.IsValid() || !vy.IsValid() { - s.report(vx.IsValid() == vy.IsValid(), 0) - return - } - - // Unable to Interface implies unexported field without visibility access. - if !vx.CanInterface() || !vy.CanInterface() { - help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" - var name string - if t := s.curPath.Index(-2).Type(); t.Name() != "" { - // Named type with unexported fields. - name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - isProtoMessage := func(t reflect.Type) bool { - m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") - return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && - m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && - m.Type.Out(0).Name() == "Message" - } - if isProtoMessage(t) { - help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` - } else if _, ok := reflect.New(t).Interface().(error); ok { - help = "consider using cmpopts.EquateErrors to compare error values" - } else if t.Comparable() { - help = "consider using cmpopts.EquateComparable to compare comparable Go types" - } - } else { - // Unnamed type with unexported fields. Derive PkgPath from field. - var pkgPath string - for i := 0; i < t.NumField() && pkgPath == ""; i++ { - pkgPath = t.Field(i).PkgPath - } - name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) - } - panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) - } - - panic("not reachable") -} - -// identRx represents a valid identifier according to the Go specification. -const identRx = `[_\p{L}][_\p{L}\p{N}]*` - -var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) - -// Transformer returns an [Option] that applies a transformation function that -// converts values of a certain type into that of another. -// -// The transformer f must be a function "func(T) R" that converts values of -// type T to those of type R and is implicitly filtered to input values -// assignable to T. The transformer must not mutate T in any way. -// -// To help prevent some cases of infinite recursive cycles applying the -// same transform to the output of itself (e.g., in the case where the -// input and output types are the same), an implicit filter is added such that -// a transformer is applicable only if that exact transformer is not already -// in the tail of the [Path] since the last non-[Transform] step. -// For situations where the implicit filter is still insufficient, -// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], -// which adds a filter to prevent the transformer from -// being recursively applied upon itself. -// -// The name is a user provided label that is used as the [Transform.Name] in the -// transformation [PathStep] (and eventually shown in the [Diff] output). -// The name must be a valid identifier or qualified identifier in Go syntax. -// If empty, an arbitrary name is used. -func Transformer(name string, f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { - panic(fmt.Sprintf("invalid transformer function: %T", f)) - } - if name == "" { - name = function.NameOf(v) - if !identsRx.MatchString(name) { - name = "λ" // Lambda-symbol as placeholder name - } - } else if !identsRx.MatchString(name) { - panic(fmt.Sprintf("invalid name: %q", name)) - } - tr := &transformer{name: name, fnc: reflect.ValueOf(f)} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - tr.typ = ti - } - return tr -} - -type transformer struct { - core - name string - typ reflect.Type // T - fnc reflect.Value // func(T) R -} - -func (tr *transformer) isFiltered() bool { return tr.typ != nil } - -func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { - for i := len(s.curPath) - 1; i >= 0; i-- { - if t, ok := s.curPath[i].(Transform); !ok { - break // Hit most recent non-Transform step - } else if tr == t.trans { - return nil // Cannot directly use same Transform - } - } - if tr.typ == nil || t.AssignableTo(tr.typ) { - return tr - } - return nil -} - -func (tr *transformer) apply(s *state, vx, vy reflect.Value) { - step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} - vvx := s.callTRFunc(tr.fnc, vx, step) - vvy := s.callTRFunc(tr.fnc, vy, step) - step.vx, step.vy = vvx, vvy - s.compareAny(step) -} - -func (tr transformer) String() string { - return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) -} - -// Comparer returns an [Option] that determines whether two values are equal -// to each other. -// -// The comparer f must be a function "func(T, T) bool" and is implicitly -// filtered to input values assignable to T. If T is an interface, it is -// possible that f is called with two values of different concrete types that -// both implement T. -// -// The equality function must be: -// - Symmetric: equal(x, y) == equal(y, x) -// - Deterministic: equal(x, y) == equal(x, y) -// - Pure: equal(x, y) does not modify x or y -func Comparer(f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Equal) || v.IsNil() { - panic(fmt.Sprintf("invalid comparer function: %T", f)) - } - cm := &comparer{fnc: v} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - cm.typ = ti - } - return cm -} - -type comparer struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (cm *comparer) isFiltered() bool { return cm.typ != nil } - -func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { - if cm.typ == nil || t.AssignableTo(cm.typ) { - return cm - } - return nil -} - -func (cm *comparer) apply(s *state, vx, vy reflect.Value) { - eq := s.callTTBFunc(cm.fnc, vx, vy) - s.report(eq, reportByFunc) -} - -func (cm comparer) String() string { - return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) -} - -// Exporter returns an [Option] that specifies whether [Equal] is allowed to -// introspect into the unexported fields of certain struct types. -// -// Users of this option must understand that comparing on unexported fields -// from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of [Equal] -// to unexpectedly change. However, it may be valid to use this option on types -// defined in an internal package where the semantic meaning of an unexported -// field is in the control of the user. -// -// In many cases, a custom [Comparer] should be used instead that defines -// equality as a function of the public API of a type rather than the underlying -// unexported implementation. -// -// For example, the [reflect.Type] documentation defines equality to be determined -// by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *[regexp.Regexp] types are interested -// in only checking that the regular expression strings are equal. -// Both of these are accomplished using [Comparer] options: -// -// Comparer(func(x, y reflect.Type) bool { return x == y }) -// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) -// -// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] -// option can be used to ignore all unexported fields on specified struct types. -func Exporter(f func(reflect.Type) bool) Option { - return exporter(f) -} - -type exporter func(reflect.Type) bool - -func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") -} - -// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect -// unexported fields of the specified struct types. -// -// See [Exporter] for the proper use of this option. -func AllowUnexported(types ...interface{}) Option { - m := make(map[reflect.Type]bool) - for _, typ := range types { - t := reflect.TypeOf(typ) - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("invalid struct type: %T", typ)) - } - m[t] = true - } - return exporter(func(t reflect.Type) bool { return m[t] }) -} - -// Result represents the comparison result for a single node and -// is provided by cmp when calling Report (see [Reporter]). -type Result struct { - _ [0]func() // Make Result incomparable - flags resultFlags -} - -// Equal reports whether the node was determined to be equal or not. -// As a special case, ignored nodes are considered equal. -func (r Result) Equal() bool { - return r.flags&(reportEqual|reportByIgnore) != 0 -} - -// ByIgnore reports whether the node is equal because it was ignored. -// This never reports true if [Result.Equal] reports false. -func (r Result) ByIgnore() bool { - return r.flags&reportByIgnore != 0 -} - -// ByMethod reports whether the Equal method determined equality. -func (r Result) ByMethod() bool { - return r.flags&reportByMethod != 0 -} - -// ByFunc reports whether a [Comparer] function determined equality. -func (r Result) ByFunc() bool { - return r.flags&reportByFunc != 0 -} - -// ByCycle reports whether a reference cycle was detected. -func (r Result) ByCycle() bool { - return r.flags&reportByCycle != 0 -} - -type resultFlags uint - -const ( - _ resultFlags = (1 << iota) / 2 - - reportEqual - reportUnequal - reportByIgnore - reportByMethod - reportByFunc - reportByCycle -) - -// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses -// the value trees, it calls PushStep as it descends into each node in the -// tree and PopStep as it ascend out of the node. The leaves of the tree are -// either compared (determined to be equal or not equal) or ignored and reported -// as such by calling the Report method. -func Reporter(r interface { - // PushStep is called when a tree-traversal operation is performed. - // The PathStep itself is only valid until the step is popped. - // The PathStep.Values are valid for the duration of the entire traversal - // and must not be mutated. - // - // Equal always calls PushStep at the start to provide an operation-less - // PathStep used to report the root values. - // - // Within a slice, the exact set of inserted, removed, or modified elements - // is unspecified and may change in future implementations. - // The entries of a map are iterated through in an unspecified order. - PushStep(PathStep) - - // Report is called exactly once on leaf nodes to report whether the - // comparison identified the node as equal, unequal, or ignored. - // A leaf node is one that is immediately preceded by and followed by - // a pair of PushStep and PopStep calls. - Report(Result) - - // PopStep ascends back up the value tree. - // There is always a matching pop call for every push call. - PopStep() -}) Option { - return reporter{r} -} - -type reporter struct{ reporterIface } -type reporterIface interface { - PushStep(PathStep) - Report(Result) - PopStep() -} - -func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") -} - -// normalizeOption normalizes the input options such that all Options groups -// are flattened and groups with a single element are reduced to that element. -// Only coreOptions and Options containing coreOptions are allowed. -func normalizeOption(src Option) Option { - switch opts := flattenOptions(nil, Options{src}); len(opts) { - case 0: - return nil - case 1: - return opts[0] - default: - return opts - } -} - -// flattenOptions copies all options in src to dst as a flat list. -// Only coreOptions and Options containing coreOptions are allowed. -func flattenOptions(dst, src Options) Options { - for _, opt := range src { - switch opt := opt.(type) { - case nil: - continue - case Options: - dst = flattenOptions(dst, opt) - case coreOption: - dst = append(dst, opt) - default: - panic(fmt.Sprintf("invalid option type: %T", opt)) - } - } - return dst -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/path.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/path.go deleted file mode 100644 index c3c1456423..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/path.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/value" -) - -// Path is a list of [PathStep] describing the sequence of operations to get -// from some root type to the current position in the value tree. -// The first Path element is always an operation-less [PathStep] that exists -// simply to identify the initial type. -// -// When traversing structs with embedded structs, the embedded struct will -// always be accessed as a field before traversing the fields of the -// embedded struct themselves. That is, an exported field from the -// embedded struct will never be accessed directly from the parent struct. -type Path []PathStep - -// PathStep is a union-type for specific operations to traverse -// a value's tree structure. Users of this package never need to implement -// these types as values of this type will be returned by this package. -// -// Implementations of this interface: -// - [StructField] -// - [SliceIndex] -// - [MapIndex] -// - [Indirect] -// - [TypeAssertion] -// - [Transform] -type PathStep interface { - String() string - - // Type is the resulting type after performing the path step. - Type() reflect.Type - - // Values is the resulting values after performing the path step. - // The type of each valid value is guaranteed to be identical to Type. - // - // In some cases, one or both may be invalid or have restrictions: - // - For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // - For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // - For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. - // - // The provided values must not be mutated. - Values() (vx, vy reflect.Value) -} - -var ( - _ PathStep = StructField{} - _ PathStep = SliceIndex{} - _ PathStep = MapIndex{} - _ PathStep = Indirect{} - _ PathStep = TypeAssertion{} - _ PathStep = Transform{} -) - -func (pa *Path) push(s PathStep) { - *pa = append(*pa, s) -} - -func (pa *Path) pop() { - *pa = (*pa)[:len(*pa)-1] -} - -// Last returns the last [PathStep] in the Path. -// If the path is empty, this returns a non-nil [PathStep] -// that reports a nil [PathStep.Type]. -func (pa Path) Last() PathStep { - return pa.Index(-1) -} - -// Index returns the ith step in the Path and supports negative indexing. -// A negative index starts counting from the tail of the Path such that -1 -// refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil [PathStep] -// that reports a nil [PathStep.Type]. -func (pa Path) Index(i int) PathStep { - if i < 0 { - i = len(pa) + i - } - if i < 0 || i >= len(pa) { - return pathStep{} - } - return pa[i] -} - -// String returns the simplified path to a node. -// The simplified path only contains struct field accesses. -// -// For example: -// -// MyMap.MySlices.MyField -func (pa Path) String() string { - var ss []string - for _, s := range pa { - if _, ok := s.(StructField); ok { - ss = append(ss, s.String()) - } - } - return strings.TrimPrefix(strings.Join(ss, ""), ".") -} - -// GoString returns the path to a specific node using Go syntax. -// -// For example: -// -// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField -func (pa Path) GoString() string { - var ssPre, ssPost []string - var numIndirect int - for i, s := range pa { - var nextStep PathStep - if i+1 < len(pa) { - nextStep = pa[i+1] - } - switch s := s.(type) { - case Indirect: - numIndirect++ - pPre, pPost := "(", ")" - switch nextStep.(type) { - case Indirect: - continue // Next step is indirection, so let them batch up - case StructField: - numIndirect-- // Automatic indirection on struct fields - case nil: - pPre, pPost = "", "" // Last step; no need for parenthesis - } - if numIndirect > 0 { - ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) - ssPost = append(ssPost, pPost) - } - numIndirect = 0 - continue - case Transform: - ssPre = append(ssPre, s.trans.name+"(") - ssPost = append(ssPost, ")") - continue - } - ssPost = append(ssPost, s.String()) - } - for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { - ssPre[i], ssPre[j] = ssPre[j], ssPre[i] - } - return strings.Join(ssPre, "") + strings.Join(ssPost, "") -} - -type pathStep struct { - typ reflect.Type - vx, vy reflect.Value -} - -func (ps pathStep) Type() reflect.Type { return ps.typ } -func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } -func (ps pathStep) String() string { - if ps.typ == nil { - return "" - } - s := value.TypeString(ps.typ, false) - if s == "" || strings.ContainsAny(s, "{}\n") { - return "root" // Type too simple or complex to print - } - return fmt.Sprintf("{%s}", s) -} - -// StructField is a [PathStep] that represents a struct field access -// on a field called [StructField.Name]. -type StructField struct{ *structField } -type structField struct { - pathStep - name string - idx int - - // These fields are used for forcibly accessing an unexported field. - // pvx, pvy, and field are only valid if unexported is true. - unexported bool - mayForce bool // Forcibly allow visibility - paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressable) - field reflect.StructField // Field information -} - -func (sf StructField) Type() reflect.Type { return sf.typ } -func (sf StructField) Values() (vx, vy reflect.Value) { - if !sf.unexported { - return sf.vx, sf.vy // CanInterface reports true - } - - // Forcibly obtain read-write access to an unexported struct field. - if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) - vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) - return vx, vy // CanInterface reports true - } - return sf.vx, sf.vy // CanInterface reports false -} -func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } - -// Name is the field name. -func (sf StructField) Name() string { return sf.name } - -// Index is the index of the field in the parent struct type. -// See [reflect.Type.Field]. -func (sf StructField) Index() int { return sf.idx } - -// SliceIndex is a [PathStep] that represents an index operation on -// a slice or array at some index [SliceIndex.Key]. -type SliceIndex struct{ *sliceIndex } -type sliceIndex struct { - pathStep - xkey, ykey int - isSlice bool // False for reflect.Array -} - -func (si SliceIndex) Type() reflect.Type { return si.typ } -func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } -func (si SliceIndex) String() string { - switch { - case si.xkey == si.ykey: - return fmt.Sprintf("[%d]", si.xkey) - case si.ykey == -1: - // [5->?] means "I don't know where X[5] went" - return fmt.Sprintf("[%d->?]", si.xkey) - case si.xkey == -1: - // [?->3] means "I don't know where Y[3] came from" - return fmt.Sprintf("[?->%d]", si.ykey) - default: - // [5->3] means "X[5] moved to Y[3]" - return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) - } -} - -// Key is the index key; it may return -1 if in a split state -func (si SliceIndex) Key() int { - if si.xkey != si.ykey { - return -1 - } - return si.xkey -} - -// SplitKeys are the indexes for indexing into slices in the -// x and y values, respectively. These indexes may differ due to the -// insertion or removal of an element in one of the slices, causing -// all of the indexes to be shifted. If an index is -1, then that -// indicates that the element does not exist in the associated slice. -// -// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes -// returned by SplitKeys are not the same. SplitKeys will never return -1 for -// both indexes. -func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } - -// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. -type MapIndex struct{ *mapIndex } -type mapIndex struct { - pathStep - key reflect.Value -} - -func (mi MapIndex) Type() reflect.Type { return mi.typ } -func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } -func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } - -// Key is the value of the map key. -func (mi MapIndex) Key() reflect.Value { return mi.key } - -// Indirect is a [PathStep] that represents pointer indirection on the parent type. -type Indirect struct{ *indirect } -type indirect struct { - pathStep -} - -func (in Indirect) Type() reflect.Type { return in.typ } -func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } -func (in Indirect) String() string { return "*" } - -// TypeAssertion is a [PathStep] that represents a type assertion on an interface. -type TypeAssertion struct{ *typeAssertion } -type typeAssertion struct { - pathStep -} - -func (ta TypeAssertion) Type() reflect.Type { return ta.typ } -func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } - -// Transform is a [PathStep] that represents a transformation -// from the parent type to the current type. -type Transform struct{ *transform } -type transform struct { - pathStep - trans *transformer -} - -func (tf Transform) Type() reflect.Type { return tf.typ } -func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } -func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } - -// Name is the name of the [Transformer]. -func (tf Transform) Name() string { return tf.trans.name } - -// Func is the function pointer to the transformer function. -func (tf Transform) Func() reflect.Value { return tf.trans.fnc } - -// Option returns the originally constructed [Transformer] option. -// The == operator can be used to detect the exact option used. -func (tf Transform) Option() Option { return tf.trans } - -// pointerPath represents a dual-stack of pointers encountered when -// recursively traversing the x and y values. This data structure supports -// detection of cycles and determining whether the cycles are equal. -// In Go, cycles can occur via pointers, slices, and maps. -// -// The pointerPath uses a map to represent a stack; where descension into a -// pointer pushes the address onto the stack, and ascension from a pointer -// pops the address from the stack. Thus, when traversing into a pointer from -// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles -// by checking whether the pointer has already been visited. The cycle detection -// uses a separate stack for the x and y values. -// -// If a cycle is detected we need to determine whether the two pointers -// should be considered equal. The definition of equality chosen by Equal -// requires two graphs to have the same structure. To determine this, both the -// x and y values must have a cycle where the previous pointers were also -// encountered together as a pair. -// -// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and -// MapIndex with pointer information for the x and y values. -// Suppose px and py are two pointers to compare, we then search the -// Path for whether px was ever encountered in the Path history of x, and -// similarly so with py. If either side has a cycle, the comparison is only -// equal if both px and py have a cycle resulting from the same PathStep. -// -// Using a map as a stack is more performant as we can perform cycle detection -// in O(1) instead of O(N) where N is len(Path). -type pointerPath struct { - // mx is keyed by x pointers, where the value is the associated y pointer. - mx map[value.Pointer]value.Pointer - // my is keyed by y pointers, where the value is the associated x pointer. - my map[value.Pointer]value.Pointer -} - -func (p *pointerPath) Init() { - p.mx = make(map[value.Pointer]value.Pointer) - p.my = make(map[value.Pointer]value.Pointer) -} - -// Push indicates intent to descend into pointers vx and vy where -// visited reports whether either has been seen before. If visited before, -// equal reports whether both pointers were encountered together. -// Pop must be called if and only if the pointers were never visited. -// -// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map -// and be non-nil. -func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { - px := value.PointerOf(vx) - py := value.PointerOf(vy) - _, ok1 := p.mx[px] - _, ok2 := p.my[py] - if ok1 || ok2 { - equal = p.mx[px] == py && p.my[py] == px // Pointers paired together - return equal, true - } - p.mx[px] = py - p.my[py] = px - return false, false -} - -// Pop ascends from pointers vx and vy. -func (p pointerPath) Pop(vx, vy reflect.Value) { - delete(p.mx, value.PointerOf(vx)) - delete(p.my, value.PointerOf(vy)) -} - -// isExported reports whether the identifier is exported. -func isExported(id string) bool { - r, _ := utf8.DecodeRuneInString(id) - return unicode.IsUpper(r) -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/report.go deleted file mode 100644 index f43cd12eb5..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -// defaultReporter implements the reporter interface. -// -// As Equal serially calls the PushStep, Report, and PopStep methods, the -// defaultReporter constructs a tree-based representation of the compared value -// and the result of each comparison (see valueNode). -// -// When the String method is called, the FormatDiff method transforms the -// valueNode tree into a textNode tree, which is a tree-based representation -// of the textual output (see textNode). -// -// Lastly, the textNode.String method produces the final report as a string. -type defaultReporter struct { - root *valueNode - curr *valueNode -} - -func (r *defaultReporter) PushStep(ps PathStep) { - r.curr = r.curr.PushStep(ps) - if r.root == nil { - r.root = r.curr - } -} -func (r *defaultReporter) Report(rs Result) { - r.curr.Report(rs) -} -func (r *defaultReporter) PopStep() { - r.curr = r.curr.PopStep() -} - -// String provides a full report of the differences detected as a structured -// literal in pseudo-Go syntax. String may only be called after the entire tree -// has been traversed. -func (r *defaultReporter) String() string { - assert(r.root != nil && r.curr == nil) - if r.root.NumDiff == 0 { - return "" - } - ptrs := new(pointerReferences) - text := formatOptions{}.FormatDiff(r.root, ptrs) - resolveReferences(text) - return text.String() -} - -func assert(ok bool) { - if !ok { - panic("assertion failure") - } -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_compare.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_compare.go deleted file mode 100644 index 2050bf6b46..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" -) - -// numContextRecords is the number of surrounding equal records to print. -const numContextRecords = 2 - -type diffMode byte - -const ( - diffUnknown diffMode = 0 - diffIdentical diffMode = ' ' - diffRemoved diffMode = '-' - diffInserted diffMode = '+' -) - -type typeMode int - -const ( - // emitType always prints the type. - emitType typeMode = iota - // elideType never prints the type. - elideType - // autoType prints the type only for composite kinds - // (i.e., structs, slices, arrays, and maps). - autoType -) - -type formatOptions struct { - // DiffMode controls the output mode of FormatDiff. - // - // If diffUnknown, then produce a diff of the x and y values. - // If diffIdentical, then emit values as if they were equal. - // If diffRemoved, then only emit x values (ignoring y values). - // If diffInserted, then only emit y values (ignoring x values). - DiffMode diffMode - - // TypeMode controls whether to print the type for the current node. - // - // As a general rule of thumb, we always print the type of the next node - // after an interface, and always elide the type of the next node after - // a slice or map node. - TypeMode typeMode - - // formatValueOptions are options specific to printing reflect.Values. - formatValueOptions -} - -func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { - opts.DiffMode = d - return opts -} -func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { - opts.TypeMode = t - return opts -} -func (opts formatOptions) WithVerbosity(level int) formatOptions { - opts.VerbosityLevel = level - opts.LimitVerbosity = true - return opts -} -func (opts formatOptions) verbosity() uint { - switch { - case opts.VerbosityLevel < 0: - return 0 - case opts.VerbosityLevel > 16: - return 16 // some reasonable maximum to avoid shift overflow - default: - return uint(opts.VerbosityLevel) - } -} - -const maxVerbosityPreset = 6 - -// verbosityPreset modifies the verbosity settings given an index -// between 0 and maxVerbosityPreset, inclusive. -func verbosityPreset(opts formatOptions, i int) formatOptions { - opts.VerbosityLevel = int(opts.verbosity()) + 2*i - if i > 0 { - opts.AvoidStringer = true - } - if i >= maxVerbosityPreset { - opts.PrintAddresses = true - opts.QualifiedNames = true - } - return opts -} - -// FormatDiff converts a valueNode tree into a textNode tree, where the later -// is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { - if opts.DiffMode == diffIdentical { - opts = opts.WithVerbosity(1) - } else if opts.verbosity() < 3 { - opts = opts.WithVerbosity(3) - } - - // Check whether we have specialized formatting for this node. - // This is not necessary, but helpful for producing more readable outputs. - if opts.CanFormatDiffSlice(v) { - return opts.FormatDiffSlice(v) - } - - var parentKind reflect.Kind - if v.parent != nil && v.parent.TransformerName == "" { - parentKind = v.parent.Type.Kind() - } - - // For leaf nodes, format the value based on the reflect.Values alone. - // As a special case, treat equal []byte as a leaf nodes. - isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType - isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 - if v.MaxDepth == 0 || isEqualBytes { - switch opts.DiffMode { - case diffUnknown, diffIdentical: - // Format Equal. - if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, parentKind, ptrs) - outy := opts.FormatValue(v.ValueY, parentKind, ptrs) - if v.NumIgnored > 0 && v.NumSame == 0 { - return textEllipsis - } else if outx.Len() < outy.Len() { - return outx - } else { - return outy - } - } - - // Format unequal. - assert(opts.DiffMode == diffUnknown) - var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) - for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { - opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) - outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) - outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) - } - if outx != nil { - list = append(list, textRecord{Diff: '-', Value: outx}) - } - if outy != nil { - list = append(list, textRecord{Diff: '+', Value: outy}) - } - return opts.WithTypeMode(emitType).FormatType(v.Type, list) - case diffRemoved: - return opts.FormatValue(v.ValueX, parentKind, ptrs) - case diffInserted: - return opts.FormatValue(v.ValueY, parentKind, ptrs) - default: - panic("invalid diff mode") - } - } - - // Register slice element to support cycle detection. - if parentKind == reflect.Slice { - ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) - defer ptrs.Pop() - defer func() { out = wrapTrunkReferences(ptrRefs, out) }() - } - - // Descend into the child value node. - if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) - out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} - return opts.FormatType(v.Type, out) - } else { - switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice: - out = opts.formatDiffList(v.Records, k, ptrs) - out = opts.FormatType(v.Type, out) - case reflect.Map: - // Register map to support cycle detection. - ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) - defer ptrs.Pop() - - out = opts.formatDiffList(v.Records, k, ptrs) - out = wrapTrunkReferences(ptrRefs, out) - out = opts.FormatType(v.Type, out) - case reflect.Ptr: - // Register pointer to support cycle detection. - ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) - defer ptrs.Pop() - - out = opts.FormatDiff(v.Value, ptrs) - out = wrapTrunkReferences(ptrRefs, out) - out = &textWrap{Prefix: "&", Value: out} - case reflect.Interface: - out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) - default: - panic(fmt.Sprintf("%v cannot have children", k)) - } - return out - } -} - -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { - // Derive record name based on the data structure kind. - var name string - var formatKey func(reflect.Value) string - switch k { - case reflect.Struct: - name = "field" - opts = opts.WithTypeMode(autoType) - formatKey = func(v reflect.Value) string { return v.String() } - case reflect.Slice, reflect.Array: - name = "element" - opts = opts.WithTypeMode(elideType) - formatKey = func(reflect.Value) string { return "" } - case reflect.Map: - name = "entry" - opts = opts.WithTypeMode(elideType) - formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } - } - - maxLen := -1 - if opts.LimitVerbosity { - if opts.DiffMode == diffIdentical { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - } else { - maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... - } - opts.VerbosityLevel-- - } - - // Handle unification. - switch opts.DiffMode { - case diffIdentical, diffRemoved, diffInserted: - var list textList - var deferredEllipsis bool // Add final "..." to indicate records were dropped - for _, r := range recs { - if len(list) == maxLen { - deferredEllipsis = true - break - } - - // Elide struct fields that are zero value. - if k == reflect.Struct { - var isZero bool - switch opts.DiffMode { - case diffIdentical: - isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() - case diffRemoved: - isZero = r.Value.ValueX.IsZero() - case diffInserted: - isZero = r.Value.ValueY.IsZero() - } - if isZero { - continue - } - } - // Elide ignored nodes. - if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { - deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) - if !deferredEllipsis { - list.AppendEllipsis(diffStats{}) - } - continue - } - if out := opts.FormatDiff(r.Value, ptrs); out != nil { - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - } - } - if deferredEllipsis { - list.AppendEllipsis(diffStats{}) - } - return &textWrap{Prefix: "{", Value: list, Suffix: "}"} - case diffUnknown: - default: - panic("invalid diff mode") - } - - // Handle differencing. - var numDiffs int - var list textList - var keys []reflect.Value // invariant: len(list) == len(keys) - groups := coalesceAdjacentRecords(name, recs) - maxGroup := diffStats{Name: name} - for i, ds := range groups { - if maxLen >= 0 && numDiffs >= maxLen { - maxGroup = maxGroup.Append(ds) - continue - } - - // Handle equal records. - if ds.NumDiff() == 0 { - // Compute the number of leading and trailing records to print. - var numLo, numHi int - numEqual := ds.NumIgnored + ds.NumIdentical - for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { - if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { - break - } - numLo++ - } - for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { - if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { - break - } - numHi++ - } - if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { - numHi++ // Avoid pointless coalescing of a single equal record - } - - // Format the equal values. - for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - } - if numEqual > numLo+numHi { - ds.NumIdentical -= numLo + numHi - list.AppendEllipsis(ds) - for len(keys) < len(list) { - keys = append(keys, reflect.Value{}) - } - } - for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - } - recs = recs[numEqual:] - continue - } - - // Handle unequal records. - for _, r := range recs[:ds.NumDiff()] { - switch { - case opts.CanFormatDiffSlice(r.Value): - out := opts.FormatDiffSlice(r.Value) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) - for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { - opts2 := verbosityPreset(opts, i) - outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) - outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) - } - if outx != nil { - list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) - keys = append(keys, r.Key) - } - if outy != nil { - list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) - keys = append(keys, r.Key) - } - default: - out := opts.FormatDiff(r.Value, ptrs) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - } - } - recs = recs[ds.NumDiff():] - numDiffs += ds.NumDiff() - } - if maxGroup.IsZero() { - assert(len(recs) == 0) - } else { - list.AppendEllipsis(maxGroup) - for len(keys) < len(list) { - keys = append(keys, reflect.Value{}) - } - } - assert(len(list) == len(keys)) - - // For maps, the default formatting logic uses fmt.Stringer which may - // produce ambiguous output. Avoid calling String to disambiguate. - if k == reflect.Map { - var ambiguous bool - seenKeys := map[string]reflect.Value{} - for i, currKey := range keys { - if currKey.IsValid() { - strKey := list[i].Key - prevKey, seen := seenKeys[strKey] - if seen && prevKey.CanInterface() && currKey.CanInterface() { - ambiguous = prevKey.Interface() != currKey.Interface() - if ambiguous { - break - } - } - seenKeys[strKey] = currKey - } - } - if ambiguous { - for i, k := range keys { - if k.IsValid() { - list[i].Key = formatMapKey(k, true, ptrs) - } - } - } - } - - return &textWrap{Prefix: "{", Value: list, Suffix: "}"} -} - -// coalesceAdjacentRecords coalesces the list of records into groups of -// adjacent equal, or unequal counts. -func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { - groups = append(groups, diffStats{Name: name}) - prevCase = i - } - return &groups[len(groups)-1] - } - for _, r := range recs { - switch rv := r.Value; { - case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: - lastStats(1).NumIgnored++ - case rv.NumDiff == 0: - lastStats(1).NumIdentical++ - case rv.NumDiff > 0 && !rv.ValueY.IsValid(): - lastStats(2).NumRemoved++ - case rv.NumDiff > 0 && !rv.ValueX.IsValid(): - lastStats(2).NumInserted++ - default: - lastStats(2).NumModified++ - } - } - return groups -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_references.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_references.go deleted file mode 100644 index be31b33a9e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_references.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2020, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/flags" - "github.com/google/go-cmp/cmp/internal/value" -) - -const ( - pointerDelimPrefix = "⟪" - pointerDelimSuffix = "⟫" -) - -// formatPointer prints the address of the pointer. -func formatPointer(p value.Pointer, withDelims bool) string { - v := p.Uintptr() - if flags.Deterministic { - v = 0xdeadf00f // Only used for stable testing purposes - } - if withDelims { - return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix - } - return formatHex(uint64(v)) -} - -// pointerReferences is a stack of pointers visited so far. -type pointerReferences [][2]value.Pointer - -func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { - if deref && vx.IsValid() { - vx = vx.Addr() - } - if deref && vy.IsValid() { - vy = vy.Addr() - } - switch d { - case diffUnknown, diffIdentical: - pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} - case diffRemoved: - pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} - case diffInserted: - pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} - } - *ps = append(*ps, pp) - return pp -} - -func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { - p = value.PointerOf(v) - for _, pp := range *ps { - if p == pp[0] || p == pp[1] { - return p, true - } - } - *ps = append(*ps, [2]value.Pointer{p, p}) - return p, false -} - -func (ps *pointerReferences) Pop() { - *ps = (*ps)[:len(*ps)-1] -} - -// trunkReferences is metadata for a textNode indicating that the sub-tree -// represents the value for either pointer in a pair of references. -type trunkReferences struct{ pp [2]value.Pointer } - -// trunkReference is metadata for a textNode indicating that the sub-tree -// represents the value for the given pointer reference. -type trunkReference struct{ p value.Pointer } - -// leafReference is metadata for a textNode indicating that the value is -// truncated as it refers to another part of the tree (i.e., a trunk). -type leafReference struct{ p value.Pointer } - -func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { - switch { - case pp[0].IsNil(): - return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} - case pp[1].IsNil(): - return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} - case pp[0] == pp[1]: - return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} - default: - return &textWrap{Value: s, Metadata: trunkReferences{pp}} - } -} -func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { - var prefix string - if printAddress { - prefix = formatPointer(p, true) - } - return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} -} -func makeLeafReference(p value.Pointer, printAddress bool) textNode { - out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} - var prefix string - if printAddress { - prefix = formatPointer(p, true) - } - return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} -} - -// resolveReferences walks the textNode tree searching for any leaf reference -// metadata and resolves each against the corresponding trunk references. -// Since pointer addresses in memory are not particularly readable to the user, -// it replaces each pointer value with an arbitrary and unique reference ID. -func resolveReferences(s textNode) { - var walkNodes func(textNode, func(textNode)) - walkNodes = func(s textNode, f func(textNode)) { - f(s) - switch s := s.(type) { - case *textWrap: - walkNodes(s.Value, f) - case textList: - for _, r := range s { - walkNodes(r.Value, f) - } - } - } - - // Collect all trunks and leaves with reference metadata. - var trunks, leaves []*textWrap - walkNodes(s, func(s textNode) { - if s, ok := s.(*textWrap); ok { - switch s.Metadata.(type) { - case leafReference: - leaves = append(leaves, s) - case trunkReference, trunkReferences: - trunks = append(trunks, s) - } - } - }) - - // No leaf references to resolve. - if len(leaves) == 0 { - return - } - - // Collect the set of all leaf references to resolve. - leafPtrs := make(map[value.Pointer]bool) - for _, leaf := range leaves { - leafPtrs[leaf.Metadata.(leafReference).p] = true - } - - // Collect the set of trunk pointers that are always paired together. - // This allows us to assign a single ID to both pointers for brevity. - // If a pointer in a pair ever occurs by itself or as a different pair, - // then the pair is broken. - pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) - unpair := func(p value.Pointer) { - if !pairedTrunkPtrs[p].IsNil() { - pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half - } - pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half - } - for _, trunk := range trunks { - switch p := trunk.Metadata.(type) { - case trunkReference: - unpair(p.p) // standalone pointer cannot be part of a pair - case trunkReferences: - p0, ok0 := pairedTrunkPtrs[p.pp[0]] - p1, ok1 := pairedTrunkPtrs[p.pp[1]] - switch { - case !ok0 && !ok1: - // Register the newly seen pair. - pairedTrunkPtrs[p.pp[0]] = p.pp[1] - pairedTrunkPtrs[p.pp[1]] = p.pp[0] - case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: - // Exact pair already seen; do nothing. - default: - // Pair conflicts with some other pair; break all pairs. - unpair(p.pp[0]) - unpair(p.pp[1]) - } - } - } - - // Correlate each pointer referenced by leaves to a unique identifier, - // and print the IDs for each trunk that matches those pointers. - var nextID uint - ptrIDs := make(map[value.Pointer]uint) - newID := func() uint { - id := nextID - nextID++ - return id - } - for _, trunk := range trunks { - switch p := trunk.Metadata.(type) { - case trunkReference: - if print := leafPtrs[p.p]; print { - id, ok := ptrIDs[p.p] - if !ok { - id = newID() - ptrIDs[p.p] = id - } - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) - } - case trunkReferences: - print0 := leafPtrs[p.pp[0]] - print1 := leafPtrs[p.pp[1]] - if print0 || print1 { - id0, ok0 := ptrIDs[p.pp[0]] - id1, ok1 := ptrIDs[p.pp[1]] - isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] - if isPair { - var id uint - assert(ok0 == ok1) // must be seen together or not at all - if ok0 { - assert(id0 == id1) // must have the same ID - id = id0 - } else { - id = newID() - ptrIDs[p.pp[0]] = id - ptrIDs[p.pp[1]] = id - } - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) - } else { - if print0 && !ok0 { - id0 = newID() - ptrIDs[p.pp[0]] = id0 - } - if print1 && !ok1 { - id1 = newID() - ptrIDs[p.pp[1]] = id1 - } - switch { - case print0 && print1: - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) - case print0: - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) - case print1: - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) - } - } - } - } - } - - // Update all leaf references with the unique identifier. - for _, leaf := range leaves { - if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { - leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) - } - } -} - -func formatReference(id uint) string { - return fmt.Sprintf("ref#%d", id) -} - -func updateReferencePrefix(prefix, ref string) string { - if prefix == "" { - return pointerDelimPrefix + ref + pointerDelimSuffix - } - suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) - return pointerDelimPrefix + ref + ": " + suffix -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_reflect.go deleted file mode 100644 index e39f42284e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/value" -) - -var ( - anyType = reflect.TypeOf((*interface{})(nil)).Elem() - stringType = reflect.TypeOf((*string)(nil)).Elem() - bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() - byteType = reflect.TypeOf((*byte)(nil)).Elem() -) - -type formatValueOptions struct { - // AvoidStringer controls whether to avoid calling custom stringer - // methods like error.Error or fmt.Stringer.String. - AvoidStringer bool - - // PrintAddresses controls whether to print the address of all pointers, - // slice elements, and maps. - PrintAddresses bool - - // QualifiedNames controls whether FormatType uses the fully qualified name - // (including the full package path as opposed to just the package name). - QualifiedNames bool - - // VerbosityLevel controls the amount of output to produce. - // A higher value produces more output. A value of zero or lower produces - // no output (represented using an ellipsis). - // If LimitVerbosity is false, then the level is treated as infinite. - VerbosityLevel int - - // LimitVerbosity specifies that formatting should respect VerbosityLevel. - LimitVerbosity bool -} - -// FormatType prints the type as if it were wrapping s. -// This may return s as-is depending on the current type and TypeMode mode. -func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { - // Check whether to emit the type or not. - switch opts.TypeMode { - case autoType: - switch t.Kind() { - case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: - if s.Equal(textNil) { - return s - } - default: - return s - } - if opts.DiffMode == diffIdentical { - return s // elide type for identical nodes - } - case elideType: - return s - } - - // Determine the type label, applying special handling for unnamed types. - typeName := value.TypeString(t, opts.QualifiedNames) - if t.Name() == "" { - // According to Go grammar, certain type literals contain symbols that - // do not strongly bind to the next lexicographical token (e.g., *T). - switch t.Kind() { - case reflect.Chan, reflect.Func, reflect.Ptr: - typeName = "(" + typeName + ")" - } - } - return &textWrap{Prefix: typeName, Value: wrapParens(s)} -} - -// wrapParens wraps s with a set of parenthesis, but avoids it if the -// wrapped node itself is already surrounded by a pair of parenthesis or braces. -// It handles unwrapping one level of pointer-reference nodes. -func wrapParens(s textNode) textNode { - var refNode *textWrap - if s2, ok := s.(*textWrap); ok { - // Unwrap a single pointer reference node. - switch s2.Metadata.(type) { - case leafReference, trunkReference, trunkReferences: - refNode = s2 - if s3, ok := refNode.Value.(*textWrap); ok { - s2 = s3 - } - } - - // Already has delimiters that make parenthesis unnecessary. - hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") - hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") - if hasParens || hasBraces { - return s - } - } - if refNode != nil { - refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} - return s - } - return &textWrap{Prefix: "(", Value: s, Suffix: ")"} -} - -// FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in ptrs. As pointers are visited, ptrs is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { - if !v.IsValid() { - return nil - } - t := v.Type() - - // Check slice element for cycles. - if parentKind == reflect.Slice { - ptrRef, visited := ptrs.Push(v.Addr()) - if visited { - return makeLeafReference(ptrRef, false) - } - defer ptrs.Pop() - defer func() { out = wrapTrunkReference(ptrRef, false, out) }() - } - - // Check whether there is an Error or String method to call. - if !opts.AvoidStringer && v.CanInterface() { - // Avoid calling Error or String methods on nil receivers since many - // implementations crash when doing so. - if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { - var prefix, strVal string - func() { - // Swallow and ignore any panics from String or Error. - defer func() { recover() }() - switch v := v.Interface().(type) { - case error: - strVal = v.Error() - prefix = "e" - case fmt.Stringer: - strVal = v.String() - prefix = "s" - } - }() - if prefix != "" { - return opts.formatString(prefix, strVal) - } - } - } - - // Check whether to explicitly wrap the result with the type. - var skipType bool - defer func() { - if !skipType { - out = opts.FormatType(t, out) - } - }() - - switch t.Kind() { - case reflect.Bool: - return textLine(fmt.Sprint(v.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return textLine(fmt.Sprint(v.Uint())) - case reflect.Uint8: - if parentKind == reflect.Slice || parentKind == reflect.Array { - return textLine(formatHex(v.Uint())) - } - return textLine(fmt.Sprint(v.Uint())) - case reflect.Uintptr: - return textLine(formatHex(v.Uint())) - case reflect.Float32, reflect.Float64: - return textLine(fmt.Sprint(v.Float())) - case reflect.Complex64, reflect.Complex128: - return textLine(fmt.Sprint(v.Complex())) - case reflect.String: - return opts.formatString("", v.String()) - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(value.PointerOf(v), true)) - case reflect.Struct: - var list textList - v := makeAddressable(v) // needed for retrieveUnexportedField - maxLen := v.NumField() - if opts.LimitVerbosity { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - opts.VerbosityLevel-- - } - for i := 0; i < v.NumField(); i++ { - vv := v.Field(i) - if vv.IsZero() { - continue // Elide fields with zero values - } - if len(list) == maxLen { - list.AppendEllipsis(diffStats{}) - break - } - sf := t.Field(i) - if !isExported(sf.Name) { - vv = retrieveUnexportedField(v, sf, true) - } - s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) - list = append(list, textRecord{Key: sf.Name, Value: s}) - } - return &textWrap{Prefix: "{", Value: list, Suffix: "}"} - case reflect.Slice: - if v.IsNil() { - return textNil - } - - // Check whether this is a []byte of text data. - if t.Elem() == byteType { - b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } - if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { - out = opts.formatString("", string(b)) - skipType = true - return opts.FormatType(t, out) - } - } - - fallthrough - case reflect.Array: - maxLen := v.Len() - if opts.LimitVerbosity { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - opts.VerbosityLevel-- - } - var list textList - for i := 0; i < v.Len(); i++ { - if len(list) == maxLen { - list.AppendEllipsis(diffStats{}) - break - } - s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) - list = append(list, textRecord{Value: s}) - } - - out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if t.Kind() == reflect.Slice && opts.PrintAddresses { - header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) - out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} - } - return out - case reflect.Map: - if v.IsNil() { - return textNil - } - - // Check pointer for cycles. - ptrRef, visited := ptrs.Push(v) - if visited { - return makeLeafReference(ptrRef, opts.PrintAddresses) - } - defer ptrs.Pop() - - maxLen := v.Len() - if opts.LimitVerbosity { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - opts.VerbosityLevel-- - } - var list textList - for _, k := range value.SortKeys(v.MapKeys()) { - if len(list) == maxLen { - list.AppendEllipsis(diffStats{}) - break - } - sk := formatMapKey(k, false, ptrs) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) - list = append(list, textRecord{Key: sk, Value: sv}) - } - - out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) - return out - case reflect.Ptr: - if v.IsNil() { - return textNil - } - - // Check pointer for cycles. - ptrRef, visited := ptrs.Push(v) - if visited { - out = makeLeafReference(ptrRef, opts.PrintAddresses) - return &textWrap{Prefix: "&", Value: out} - } - defer ptrs.Pop() - - // Skip the name only if this is an unnamed pointer type. - // Otherwise taking the address of a value does not reproduce - // the named pointer type. - if v.Type().Name() == "" { - skipType = true // Let the underlying value print the type instead - } - out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) - out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) - out = &textWrap{Prefix: "&", Value: out} - return out - case reflect.Interface: - if v.IsNil() { - return textNil - } - // Interfaces accept different concrete types, - // so configure the underlying value to explicitly print the type. - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) - default: - panic(fmt.Sprintf("%v kind not handled", v.Kind())) - } -} - -func (opts formatOptions) formatString(prefix, s string) textNode { - maxLen := len(s) - maxLines := strings.Count(s, "\n") + 1 - if opts.LimitVerbosity { - maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... - maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... - } - - // For multiline strings, use the triple-quote syntax, - // but only use it when printing removed or inserted nodes since - // we only want the extra verbosity for those cases. - lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") - isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') - for i := 0; i < len(lines) && isTripleQuoted; i++ { - lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support - isPrintable := func(r rune) bool { - return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable - } - line := lines[i] - isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen - } - if isTripleQuoted { - var list textList - list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) - for i, line := range lines { - if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { - comment := commentString(fmt.Sprintf("%d elided lines", numElided)) - list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) - break - } - list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) - } - list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) - return &textWrap{Prefix: "(", Value: list, Suffix: ")"} - } - - // Format the string as a single-line quoted string. - if len(s) > maxLen+len(textEllipsis) { - return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) - } - return textLine(prefix + formatString(s)) -} - -// formatMapKey formats v as if it were a map key. -// The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { - var opts formatOptions - opts.DiffMode = diffIdentical - opts.TypeMode = elideType - opts.PrintAddresses = disambiguate - opts.AvoidStringer = disambiguate - opts.QualifiedNames = disambiguate - opts.VerbosityLevel = maxVerbosityPreset - opts.LimitVerbosity = true - s := opts.FormatValue(v, reflect.Map, ptrs).String() - return strings.TrimSpace(s) -} - -// formatString prints s as a double-quoted or backtick-quoted string. -func formatString(s string) string { - // Use quoted string if it the same length as a raw string literal. - // Otherwise, attempt to use the raw string form. - qs := strconv.Quote(s) - if len(qs) == 1+len(s)+1 { - return qs - } - - // Disallow newlines to ensure output is a single line. - // Only allow printable runes for readability purposes. - rawInvalid := func(r rune) bool { - return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') - } - if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { - return "`" + s + "`" - } - return qs -} - -// formatHex prints u as a hexadecimal integer in Go notation. -func formatHex(u uint64) string { - var f string - switch { - case u <= 0xff: - f = "0x%02x" - case u <= 0xffff: - f = "0x%04x" - case u <= 0xffffff: - f = "0x%06x" - case u <= 0xffffffff: - f = "0x%08x" - case u <= 0xffffffffff: - f = "0x%010x" - case u <= 0xffffffffffff: - f = "0x%012x" - case u <= 0xffffffffffffff: - f = "0x%014x" - case u <= 0xffffffffffffffff: - f = "0x%016x" - } - return fmt.Sprintf(f, u) -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_slices.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_slices.go deleted file mode 100644 index 23e444f62f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "bytes" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/diff" -) - -// CanFormatDiffSlice reports whether we support custom formatting for nodes -// that are slices of primitive kinds or strings. -func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { - switch { - case opts.DiffMode != diffUnknown: - return false // Must be formatting in diff mode - case v.NumDiff == 0: - return false // No differences detected - case !v.ValueX.IsValid() || !v.ValueY.IsValid(): - return false // Both values must be valid - case v.NumIgnored > 0: - return false // Some ignore option was used - case v.NumTransformed > 0: - return false // Some transform option was used - case v.NumCompared > 1: - return false // More than one comparison was used - case v.NumCompared == 1 && v.Type.Name() != "": - // The need for cmp to check applicability of options on every element - // in a slice is a significant performance detriment for large []byte. - // The workaround is to specify Comparer(bytes.Equal), - // which enables cmp to compare []byte more efficiently. - // If they differ, we still want to provide batched diffing. - // The logic disallows named types since they tend to have their own - // String method, with nicer formatting than what this provides. - return false - } - - // Check whether this is an interface with the same concrete types. - t := v.Type - vx, vy := v.ValueX, v.ValueY - if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { - vx, vy = vx.Elem(), vy.Elem() - t = vx.Type() - } - - // Check whether we provide specialized diffing for this type. - switch t.Kind() { - case reflect.String: - case reflect.Array, reflect.Slice: - // Only slices of primitive types have specialized handling. - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - default: - return false - } - - // Both slice values have to be non-empty. - if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { - return false - } - - // If a sufficient number of elements already differ, - // use specialized formatting even if length requirement is not met. - if v.NumDiff > v.NumSame { - return true - } - default: - return false - } - - // Use specialized string diffing for longer slices or strings. - const minLength = 32 - return vx.Len() >= minLength && vy.Len() >= minLength -} - -// FormatDiffSlice prints a diff for the slices (or strings) represented by v. -// This provides custom-tailored logic to make printing of differences in -// textual strings and slices of primitive kinds more readable. -func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { - assert(opts.DiffMode == diffUnknown) - t, vx, vy := v.Type, v.ValueX, v.ValueY - if t.Kind() == reflect.Interface { - vx, vy = vx.Elem(), vy.Elem() - t = vx.Type() - opts = opts.WithTypeMode(emitType) - } - - // Auto-detect the type of the data. - var sx, sy string - var ssx, ssy []string - var isString, isMostlyText, isPureLinedText, isBinary bool - switch { - case t.Kind() == reflect.String: - sx, sy = vx.String(), vy.String() - isString = true - case t.Kind() == reflect.Slice && t.Elem() == byteType: - sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isString = true - case t.Kind() == reflect.Array: - // Arrays need to be addressable for slice operations to work. - vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() - vx2.Set(vx) - vy2.Set(vy) - vx, vy = vx2, vy2 - } - if isString { - var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int - for i, r := range sx + sy { - numTotalRunes++ - if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { - numValidRunes++ - } - if r == '\n' { - if maxLineLen < i-lastLineIdx { - maxLineLen = i - lastLineIdx - } - lastLineIdx = i + 1 - numLines++ - } - } - isPureText := numValidRunes == numTotalRunes - isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) - isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 - isBinary = !isMostlyText - - // Avoid diffing by lines if it produces a significantly more complex - // edit script than diffing by bytes. - if isPureLinedText { - ssx = strings.Split(sx, "\n") - ssy = strings.Split(sy, "\n") - esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { - return diff.BoolResult(ssx[ix] == ssy[iy]) - }) - esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { - return diff.BoolResult(sx[ix] == sy[iy]) - }) - efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) - efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - quotedLength := len(strconv.Quote(sx + sy)) - unquotedLength := len(sx) + len(sy) - escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) - isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 - } - } - - // Format the string into printable records. - var list textList - var delim string - switch { - // If the text appears to be multi-lined text, - // then perform differencing across individual lines. - case isPureLinedText: - list = opts.formatDiffSlice( - reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", - func(v reflect.Value, d diffMode) textRecord { - s := formatString(v.Index(0).String()) - return textRecord{Diff: d, Value: textLine(s)} - }, - ) - delim = "\n" - - // If possible, use a custom triple-quote (""") syntax for printing - // differences in a string literal. This format is more readable, - // but has edge-cases where differences are visually indistinguishable. - // This format is avoided under the following conditions: - // - A line starts with `"""` - // - A line starts with "..." - // - A line contains non-printable characters - // - Adjacent different lines differ only by whitespace - // - // For example: - // - // """ - // ... // 3 identical lines - // foo - // bar - // - baz - // + BAZ - // """ - isTripleQuoted := true - prevRemoveLines := map[string]bool{} - prevInsertLines := map[string]bool{} - var list2 textList - list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) - for _, r := range list { - if !r.Value.Equal(textEllipsis) { - line, _ := strconv.Unquote(string(r.Value.(textLine))) - line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support - normLine := strings.Map(func(r rune) rune { - if unicode.IsSpace(r) { - return -1 // drop whitespace to avoid visually indistinguishable output - } - return r - }, line) - isPrintable := func(r rune) bool { - return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable - } - isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" - switch r.Diff { - case diffRemoved: - isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] - prevRemoveLines[normLine] = true - case diffInserted: - isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] - prevInsertLines[normLine] = true - } - if !isTripleQuoted { - break - } - r.Value = textLine(line) - r.ElideComma = true - } - if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group - prevRemoveLines = map[string]bool{} - prevInsertLines = map[string]bool{} - } - list2 = append(list2, r) - } - if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { - list2 = list2[:len(list2)-1] // elide single empty line at the end - } - list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) - if isTripleQuoted { - var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} - switch t.Kind() { - case reflect.String: - if t != stringType { - out = opts.FormatType(t, out) - } - case reflect.Slice: - // Always emit type for slices since the triple-quote syntax - // looks like a string (not a slice). - opts = opts.WithTypeMode(emitType) - out = opts.FormatType(t, out) - } - return out - } - - // If the text appears to be single-lined text, - // then perform differencing in approximately fixed-sized chunks. - // The output is printed as quoted strings. - case isMostlyText: - list = opts.formatDiffSlice( - reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", - func(v reflect.Value, d diffMode) textRecord { - s := formatString(v.String()) - return textRecord{Diff: d, Value: textLine(s)} - }, - ) - - // If the text appears to be binary data, - // then perform differencing in approximately fixed-sized chunks. - // The output is inspired by hexdump. - case isBinary: - list = opts.formatDiffSlice( - reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", - func(v reflect.Value, d diffMode) textRecord { - var ss []string - for i := 0; i < v.Len(); i++ { - ss = append(ss, formatHex(v.Index(i).Uint())) - } - s := strings.Join(ss, ", ") - comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) - return textRecord{Diff: d, Value: textLine(s), Comment: comment} - }, - ) - - // For all other slices of primitive types, - // then perform differencing in approximately fixed-sized chunks. - // The size of each chunk depends on the width of the element kind. - default: - var chunkSize int - if t.Elem().Kind() == reflect.Bool { - chunkSize = 16 - } else { - switch t.Elem().Bits() { - case 8: - chunkSize = 16 - case 16: - chunkSize = 12 - case 32: - chunkSize = 8 - default: - chunkSize = 8 - } - } - list = opts.formatDiffSlice( - vx, vy, chunkSize, t.Elem().Kind().String(), - func(v reflect.Value, d diffMode) textRecord { - var ss []string - for i := 0; i < v.Len(); i++ { - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - ss = append(ss, fmt.Sprint(v.Index(i).Uint())) - case reflect.Uint8, reflect.Uintptr: - ss = append(ss, formatHex(v.Index(i).Uint())) - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - ss = append(ss, fmt.Sprint(v.Index(i).Interface())) - } - } - s := strings.Join(ss, ", ") - return textRecord{Diff: d, Value: textLine(s)} - }, - ) - } - - // Wrap the output with appropriate type information. - var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if !isMostlyText { - // The "{...}" byte-sequence literal is not valid Go syntax for strings. - // Emit the type for extra clarity (e.g. "string{...}"). - if t.Kind() == reflect.String { - opts = opts.WithTypeMode(emitType) - } - return opts.FormatType(t, out) - } - switch t.Kind() { - case reflect.String: - out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != stringType { - out = opts.FormatType(t, out) - } - case reflect.Slice: - out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != bytesType { - out = opts.FormatType(t, out) - } - } - return out -} - -// formatASCII formats s as an ASCII string. -// This is useful for printing binary strings in a semi-legible way. -func formatASCII(s string) string { - b := bytes.Repeat([]byte{'.'}, len(s)) - for i := 0; i < len(s); i++ { - if ' ' <= s[i] && s[i] <= '~' { - b[i] = s[i] - } - } - return string(b) -} - -func (opts formatOptions) formatDiffSlice( - vx, vy reflect.Value, chunkSize int, name string, - makeRec func(reflect.Value, diffMode) textRecord, -) (list textList) { - eq := func(ix, iy int) bool { - return vx.Index(ix).Interface() == vy.Index(iy).Interface() - } - es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { - return diff.BoolResult(eq(ix, iy)) - }) - - appendChunks := func(v reflect.Value, d diffMode) int { - n0 := v.Len() - for v.Len() > 0 { - n := chunkSize - if n > v.Len() { - n = v.Len() - } - list = append(list, makeRec(v.Slice(0, n), d)) - v = v.Slice(n, v.Len()) - } - return n0 - v.Len() - } - - var numDiffs int - maxLen := -1 - if opts.LimitVerbosity { - maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... - opts.VerbosityLevel-- - } - - groups := coalesceAdjacentEdits(name, es) - groups = coalesceInterveningIdentical(groups, chunkSize/4) - groups = cleanupSurroundingIdentical(groups, eq) - maxGroup := diffStats{Name: name} - for i, ds := range groups { - if maxLen >= 0 && numDiffs >= maxLen { - maxGroup = maxGroup.Append(ds) - continue - } - - // Print equal. - if ds.NumDiff() == 0 { - // Compute the number of leading and trailing equal bytes to print. - var numLo, numHi int - numEqual := ds.NumIgnored + ds.NumIdentical - for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { - numLo++ - } - for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { - numHi++ - } - if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { - numHi = numEqual - numLo // Avoid pointless coalescing of single equal row - } - - // Print the equal bytes. - appendChunks(vx.Slice(0, numLo), diffIdentical) - if numEqual > numLo+numHi { - ds.NumIdentical -= numLo + numHi - list.AppendEllipsis(ds) - } - appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) - vx = vx.Slice(numEqual, vx.Len()) - vy = vy.Slice(numEqual, vy.Len()) - continue - } - - // Print unequal. - len0 := len(list) - nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) - vx = vx.Slice(nx, vx.Len()) - ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) - vy = vy.Slice(ny, vy.Len()) - numDiffs += len(list) - len0 - } - if maxGroup.IsZero() { - assert(vx.Len() == 0 && vy.Len() == 0) - } else { - list.AppendEllipsis(maxGroup) - } - return list -} - -// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent -// equal or unequal counts. -// -// Example: -// -// Input: "..XXY...Y" -// Output: [ -// {NumIdentical: 2}, -// {NumRemoved: 2, NumInserted 1}, -// {NumIdentical: 3}, -// {NumInserted: 1}, -// ] -func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevMode byte - lastStats := func(mode byte) *diffStats { - if prevMode != mode { - groups = append(groups, diffStats{Name: name}) - prevMode = mode - } - return &groups[len(groups)-1] - } - for _, e := range es { - switch e { - case diff.Identity: - lastStats('=').NumIdentical++ - case diff.UniqueX: - lastStats('!').NumRemoved++ - case diff.UniqueY: - lastStats('!').NumInserted++ - case diff.Modified: - lastStats('!').NumModified++ - } - } - return groups -} - -// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) -// equal groups into adjacent unequal groups that currently result in a -// dual inserted/removed printout. This acts as a high-pass filter to smooth -// out high-frequency changes within the windowSize. -// -// Example: -// -// WindowSize: 16, -// Input: [ -// {NumIdentical: 61}, // group 0 -// {NumRemoved: 3, NumInserted: 1}, // group 1 -// {NumIdentical: 6}, // ├── coalesce -// {NumInserted: 2}, // ├── coalesce -// {NumIdentical: 1}, // ├── coalesce -// {NumRemoved: 9}, // └── coalesce -// {NumIdentical: 64}, // group 2 -// {NumRemoved: 3, NumInserted: 1}, // group 3 -// {NumIdentical: 6}, // ├── coalesce -// {NumInserted: 2}, // ├── coalesce -// {NumIdentical: 1}, // ├── coalesce -// {NumRemoved: 7}, // ├── coalesce -// {NumIdentical: 1}, // ├── coalesce -// {NumRemoved: 2}, // └── coalesce -// {NumIdentical: 63}, // group 4 -// ] -// Output: [ -// {NumIdentical: 61}, -// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, -// {NumIdentical: 64}, -// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, -// {NumIdentical: 63}, -// ] -func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { - groups, groupsOrig := groups[:0], groups - for i, ds := range groupsOrig { - if len(groups) >= 2 && ds.NumDiff() > 0 { - prev := &groups[len(groups)-2] // Unequal group - curr := &groups[len(groups)-1] // Equal group - next := &groupsOrig[i] // Unequal group - hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 - hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 - if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { - *prev = prev.Append(*curr).Append(*next) - groups = groups[:len(groups)-1] // Truncate off equal group - continue - } - } - groups = append(groups, ds) - } - return groups -} - -// cleanupSurroundingIdentical scans through all unequal groups, and -// moves any leading sequence of equal elements to the preceding equal group and -// moves and trailing sequence of equal elements to the succeeding equal group. -// -// This is necessary since coalesceInterveningIdentical may coalesce edit groups -// together such that leading/trailing spans of equal elements becomes possible. -// Note that this can occur even with an optimal diffing algorithm. -// -// Example: -// -// Input: [ -// {NumIdentical: 61}, -// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements -// {NumIdentical: 67}, -// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements -// {NumIdentical: 54}, -// ] -// Output: [ -// {NumIdentical: 64}, // incremented by 3 -// {NumRemoved: 9}, -// {NumIdentical: 67}, -// {NumRemoved: 9}, -// {NumIdentical: 64}, // incremented by 10 -// ] -func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { - var ix, iy int // indexes into sequence x and y - for i, ds := range groups { - // Handle equal group. - if ds.NumDiff() == 0 { - ix += ds.NumIdentical - iy += ds.NumIdentical - continue - } - - // Handle unequal group. - nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified - ny := ds.NumIdentical + ds.NumInserted + ds.NumModified - var numLeadingIdentical, numTrailingIdentical int - for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { - numLeadingIdentical++ - } - for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { - numTrailingIdentical++ - } - if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { - if numLeadingIdentical > 0 { - // Remove leading identical span from this group and - // insert it into the preceding group. - if i-1 >= 0 { - groups[i-1].NumIdentical += numLeadingIdentical - } else { - // No preceding group exists, so prepend a new group, - // but do so after we finish iterating over all groups. - defer func() { - groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) - }() - } - // Increment indexes since the preceding group would have handled this. - ix += numLeadingIdentical - iy += numLeadingIdentical - } - if numTrailingIdentical > 0 { - // Remove trailing identical span from this group and - // insert it into the succeeding group. - if i+1 < len(groups) { - groups[i+1].NumIdentical += numTrailingIdentical - } else { - // No succeeding group exists, so append a new group, - // but do so after we finish iterating over all groups. - defer func() { - groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) - }() - } - // Do not increment indexes since the succeeding group will handle this. - } - - // Update this group since some identical elements were removed. - nx -= numIdentical - ny -= numIdentical - groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} - } - ix += nx - iy += ny - } - return groups -} diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_text.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_text.go deleted file mode 100644 index 388fcf5712..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_text.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "bytes" - "fmt" - "math/rand" - "strings" - "time" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/flags" -) - -var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 - -const maxColumnLength = 80 - -type indentMode int - -func (n indentMode) appendIndent(b []byte, d diffMode) []byte { - // The output of Diff is documented as being unstable to provide future - // flexibility in changing the output for more humanly readable reports. - // This logic intentionally introduces instability to the exact output - // so that users can detect accidental reliance on stability early on, - // rather than much later when an actual change to the format occurs. - if flags.Deterministic || randBool { - // Use regular spaces (U+0020). - switch d { - case diffUnknown, diffIdentical: - b = append(b, " "...) - case diffRemoved: - b = append(b, "- "...) - case diffInserted: - b = append(b, "+ "...) - } - } else { - // Use non-breaking spaces (U+00a0). - switch d { - case diffUnknown, diffIdentical: - b = append(b, "  "...) - case diffRemoved: - b = append(b, "- "...) - case diffInserted: - b = append(b, "+ "...) - } - } - return repeatCount(n).appendChar(b, '\t') -} - -type repeatCount int - -func (n repeatCount) appendChar(b []byte, c byte) []byte { - for ; n > 0; n-- { - b = append(b, c) - } - return b -} - -// textNode is a simplified tree-based representation of structured text. -// Possible node types are textWrap, textList, or textLine. -type textNode interface { - // Len reports the length in bytes of a single-line version of the tree. - // Nested textRecord.Diff and textRecord.Comment fields are ignored. - Len() int - // Equal reports whether the two trees are structurally identical. - // Nested textRecord.Diff and textRecord.Comment fields are compared. - Equal(textNode) bool - // String returns the string representation of the text tree. - // It is not guaranteed that len(x.String()) == x.Len(), - // nor that x.String() == y.String() implies that x.Equal(y). - String() string - - // formatCompactTo formats the contents of the tree as a single-line string - // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment - // fields are ignored. - // - // However, not all nodes in the tree should be collapsed as a single-line. - // If a node can be collapsed as a single-line, it is replaced by a textLine - // node. Since the top-level node cannot replace itself, this also returns - // the current node itself. - // - // This does not mutate the receiver. - formatCompactTo([]byte, diffMode) ([]byte, textNode) - // formatExpandedTo formats the contents of the tree as a multi-line string - // to the provided buffer. In order for column alignment to operate well, - // formatCompactTo must be called before calling formatExpandedTo. - formatExpandedTo([]byte, diffMode, indentMode) []byte -} - -// textWrap is a wrapper that concatenates a prefix and/or a suffix -// to the underlying node. -type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" - Metadata interface{} // arbitrary metadata; has no effect on formatting -} - -func (s *textWrap) Len() int { - return len(s.Prefix) + s.Value.Len() + len(s.Suffix) -} -func (s1 *textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(*textWrap); ok { - return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix - } - return false -} -func (s *textWrap) String() string { - var d diffMode - var n indentMode - _, s2 := s.formatCompactTo(nil, d) - b := n.appendIndent(nil, d) // Leading indent - b = s2.formatExpandedTo(b, d, n) // Main body - b = append(b, '\n') // Trailing newline - return string(b) -} -func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { - n0 := len(b) // Original buffer length - b = append(b, s.Prefix...) - b, s.Value = s.Value.formatCompactTo(b, d) - b = append(b, s.Suffix...) - if _, ok := s.Value.(textLine); ok { - return b, textLine(b[n0:]) - } - return b, s -} -func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { - b = append(b, s.Prefix...) - b = s.Value.formatExpandedTo(b, d, n) - b = append(b, s.Suffix...) - return b -} - -// textList is a comma-separated list of textWrap or textLine nodes. -// The list may be formatted as multi-lines or single-line at the discretion -// of the textList.formatCompactTo method. -type textList []textRecord -type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - ElideComma bool // avoid trailing comma - Comment fmt.Stringer // e.g., "6 identical fields" -} - -// AppendEllipsis appends a new ellipsis node to the list if none already -// exists at the end. If cs is non-zero it coalesces the statistics with the -// previous diffStats. -func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := !ds.IsZero() - if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { - if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) - } else { - *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) - } - return - } - if hasStats { - (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) - } -} - -func (s textList) Len() (n int) { - for i, r := range s { - n += len(r.Key) - if r.Key != "" { - n += len(": ") - } - n += r.Value.Len() - if i < len(s)-1 { - n += len(", ") - } - } - return n -} - -func (s1 textList) Equal(s2 textNode) bool { - if s2, ok := s2.(textList); ok { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - r1, r2 := s1[i], s2[i] - if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { - return false - } - } - return true - } - return false -} - -func (s textList) String() string { - return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() -} - -func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { - s = append(textList(nil), s...) // Avoid mutating original - - // Determine whether we can collapse this list as a single line. - n0 := len(b) // Original buffer length - var multiLine bool - for i, r := range s { - if r.Diff == diffInserted || r.Diff == diffRemoved { - multiLine = true - } - b = append(b, r.Key...) - if r.Key != "" { - b = append(b, ": "...) - } - b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) - if _, ok := s[i].Value.(textLine); !ok { - multiLine = true - } - if r.Comment != nil { - multiLine = true - } - if i < len(s)-1 { - b = append(b, ", "...) - } - } - // Force multi-lined output when printing a removed/inserted node that - // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { - multiLine = true - } - if !multiLine { - return b, textLine(b[n0:]) - } - return b, s -} - -func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { - alignKeyLens := s.alignLens( - func(r textRecord) bool { - _, isLine := r.Value.(textLine) - return r.Key == "" || !isLine - }, - func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, - ) - alignValueLens := s.alignLens( - func(r textRecord) bool { - _, isLine := r.Value.(textLine) - return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil - }, - func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, - ) - - // Format lists of simple lists in a batched form. - // If the list is sequence of only textLine values, - // then batch multiple values on a single line. - var isSimple bool - for _, r := range s { - _, isLine := r.Value.(textLine) - isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil - if !isSimple { - break - } - } - if isSimple { - n++ - var batch []byte - emitBatch := func() { - if len(batch) > 0 { - b = n.appendIndent(append(b, '\n'), d) - b = append(b, bytes.TrimRight(batch, " ")...) - batch = batch[:0] - } - } - for _, r := range s { - line := r.Value.(textLine) - if len(batch)+len(line)+len(", ") > maxColumnLength { - emitBatch() - } - batch = append(batch, line...) - batch = append(batch, ", "...) - } - emitBatch() - n-- - return n.appendIndent(append(b, '\n'), d) - } - - // Format the list as a multi-lined output. - n++ - for i, r := range s { - b = n.appendIndent(append(b, '\n'), d|r.Diff) - if r.Key != "" { - b = append(b, r.Key+": "...) - } - b = alignKeyLens[i].appendChar(b, ' ') - - b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.ElideComma { - b = append(b, ',') - } - b = alignValueLens[i].appendChar(b, ' ') - - if r.Comment != nil { - b = append(b, " // "+r.Comment.String()...) - } - } - n-- - - return n.appendIndent(append(b, '\n'), d) -} - -func (s textList) alignLens( - skipFunc func(textRecord) bool, - lenFunc func(textRecord) int, -) []repeatCount { - var startIdx, endIdx, maxLen int - lens := make([]repeatCount, len(s)) - for i, r := range s { - if skipFunc(r) { - for j := startIdx; j < endIdx && j < len(s); j++ { - lens[j] = repeatCount(maxLen - lenFunc(s[j])) - } - startIdx, endIdx, maxLen = i+1, i+1, 0 - } else { - if maxLen < lenFunc(r) { - maxLen = lenFunc(r) - } - endIdx = i + 1 - } - } - for j := startIdx; j < endIdx && j < len(s); j++ { - lens[j] = repeatCount(maxLen - lenFunc(s[j])) - } - return lens -} - -// textLine is a single-line segment of text and is always a leaf node -// in the textNode tree. -type textLine []byte - -var ( - textNil = textLine("nil") - textEllipsis = textLine("...") -) - -func (s textLine) Len() int { - return len(s) -} -func (s1 textLine) Equal(s2 textNode) bool { - if s2, ok := s2.(textLine); ok { - return bytes.Equal([]byte(s1), []byte(s2)) - } - return false -} -func (s textLine) String() string { - return string(s) -} -func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { - return append(b, s...), s -} -func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { - return append(b, s...) -} - -type diffStats struct { - Name string - NumIgnored int - NumIdentical int - NumRemoved int - NumInserted int - NumModified int -} - -func (s diffStats) IsZero() bool { - s.Name = "" - return s == diffStats{} -} - -func (s diffStats) NumDiff() int { - return s.NumRemoved + s.NumInserted + s.NumModified -} - -func (s diffStats) Append(ds diffStats) diffStats { - assert(s.Name == ds.Name) - s.NumIgnored += ds.NumIgnored - s.NumIdentical += ds.NumIdentical - s.NumRemoved += ds.NumRemoved - s.NumInserted += ds.NumInserted - s.NumModified += ds.NumModified - return s -} - -// String prints a humanly-readable summary of coalesced records. -// -// Example: -// -// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" -func (s diffStats) String() string { - var ss []string - var sum int - labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} - counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} - for i, n := range counts { - if n > 0 { - ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) - } - sum += n - } - - // Pluralize the name (adjusting for some obscure English grammar rules). - name := s.Name - if sum > 1 { - name += "s" - if strings.HasSuffix(name, "ys") { - name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" - } - } - - // Format the list according to English grammar (with Oxford comma). - switch n := len(ss); n { - case 0: - return "" - case 1, 2: - return strings.Join(ss, " and ") + " " + name - default: - return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name - } -} - -type commentString string - -func (s commentString) String() string { return string(s) } diff --git a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_value.go b/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_value.go deleted file mode 100644 index 668d470fd8..0000000000 --- a/openshift/tools/vendor/github.com/google/go-cmp/cmp/report_value.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import "reflect" - -// valueNode represents a single node within a report, which is a -// structured representation of the value tree, containing information -// regarding which nodes are equal or not. -type valueNode struct { - parent *valueNode - - Type reflect.Type - ValueX reflect.Value - ValueY reflect.Value - - // NumSame is the number of leaf nodes that are equal. - // All descendants are equal only if NumDiff is 0. - NumSame int - // NumDiff is the number of leaf nodes that are not equal. - NumDiff int - // NumIgnored is the number of leaf nodes that are ignored. - NumIgnored int - // NumCompared is the number of leaf nodes that were compared - // using an Equal method or Comparer function. - NumCompared int - // NumTransformed is the number of non-leaf nodes that were transformed. - NumTransformed int - // NumChildren is the number of transitive descendants of this node. - // This counts from zero; thus, leaf nodes have no descendants. - NumChildren int - // MaxDepth is the maximum depth of the tree. This counts from zero; - // thus, leaf nodes have a depth of zero. - MaxDepth int - - // Records is a list of struct fields, slice elements, or map entries. - Records []reportRecord // If populated, implies Value is not populated - - // Value is the result of a transformation, pointer indirect, of - // type assertion. - Value *valueNode // If populated, implies Records is not populated - - // TransformerName is the name of the transformer. - TransformerName string // If non-empty, implies Value is populated -} -type reportRecord struct { - Key reflect.Value // Invalid for slice element - Value *valueNode -} - -func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { - vx, vy := ps.Values() - child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} - switch s := ps.(type) { - case StructField: - assert(parent.Value == nil) - parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) - case SliceIndex: - assert(parent.Value == nil) - parent.Records = append(parent.Records, reportRecord{Value: child}) - case MapIndex: - assert(parent.Value == nil) - parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) - case Indirect: - assert(parent.Value == nil && parent.Records == nil) - parent.Value = child - case TypeAssertion: - assert(parent.Value == nil && parent.Records == nil) - parent.Value = child - case Transform: - assert(parent.Value == nil && parent.Records == nil) - parent.Value = child - parent.TransformerName = s.Name() - parent.NumTransformed++ - default: - assert(parent == nil) // Must be the root step - } - return child -} - -func (r *valueNode) Report(rs Result) { - assert(r.MaxDepth == 0) // May only be called on leaf nodes - - if rs.ByIgnore() { - r.NumIgnored++ - } else { - if rs.Equal() { - r.NumSame++ - } else { - r.NumDiff++ - } - } - assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) - - if rs.ByMethod() { - r.NumCompared++ - } - if rs.ByFunc() { - r.NumCompared++ - } - assert(r.NumCompared <= 1) -} - -func (child *valueNode) PopStep() (parent *valueNode) { - if child.parent == nil { - return nil - } - parent = child.parent - parent.NumSame += child.NumSame - parent.NumDiff += child.NumDiff - parent.NumIgnored += child.NumIgnored - parent.NumCompared += child.NumCompared - parent.NumTransformed += child.NumTransformed - parent.NumChildren += child.NumChildren + 1 - if parent.MaxDepth < child.MaxDepth+1 { - parent.MaxDepth = child.MaxDepth + 1 - } - return parent -} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/LICENSE b/openshift/tools/vendor/github.com/google/go-containerregistry/LICENSE new file mode 100644 index 0000000000..7a4a3ea242 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go new file mode 100644 index 0000000000..14a05eaa17 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go @@ -0,0 +1,48 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package and provides helpers for adding Close to io.{Reader|Writer}. +package and + +import ( + "io" +) + +// ReadCloser implements io.ReadCloser by reading from a particular io.Reader +// and then calling the provided "Close()" method. +type ReadCloser struct { + io.Reader + CloseFunc func() error +} + +var _ io.ReadCloser = (*ReadCloser)(nil) + +// Close implements io.ReadCloser +func (rac *ReadCloser) Close() error { + return rac.CloseFunc() +} + +// WriteCloser implements io.WriteCloser by reading from a particular io.Writer +// and then calling the provided "Close()" method. +type WriteCloser struct { + io.Writer + CloseFunc func() error +} + +var _ io.WriteCloser = (*WriteCloser)(nil) + +// Close implements io.WriteCloser +func (wac *WriteCloser) Close() error { + return wac.CloseFunc() +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/compression/compression.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/compression/compression.go new file mode 100644 index 0000000000..012487150f --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/compression/compression.go @@ -0,0 +1,97 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compression abstracts over gzip and zstd. +package compression + +import ( + "bufio" + "bytes" + "io" + + "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + "github.com/google/go-containerregistry/pkg/compression" +) + +// Opener represents e.g. opening a file. +type Opener = func() (io.ReadCloser, error) + +// GetCompression detects whether an Opener is compressed and which algorithm is used. +func GetCompression(opener Opener) (compression.Compression, error) { + rc, err := opener() + if err != nil { + return compression.None, err + } + defer rc.Close() + + cp, _, err := PeekCompression(rc) + if err != nil { + return compression.None, err + } + + return cp, nil +} + +// PeekCompression detects whether the input stream is compressed and which algorithm is used. +// +// If r implements Peek, we will use that directly, otherwise a small number +// of bytes are buffered to Peek at the gzip/zstd header, and the returned +// PeekReader can be used as a replacement for the consumed input io.Reader. +func PeekCompression(r io.Reader) (compression.Compression, PeekReader, error) { + pr := intoPeekReader(r) + + if isGZip, _, err := checkHeader(pr, gzip.MagicHeader); err != nil { + return compression.None, pr, err + } else if isGZip { + return compression.GZip, pr, nil + } + + if isZStd, _, err := checkHeader(pr, zstd.MagicHeader); err != nil { + return compression.None, pr, err + } else if isZStd { + return compression.ZStd, pr, nil + } + + return compression.None, pr, nil +} + +// PeekReader is an io.Reader that also implements Peek a la bufio.Reader. +type PeekReader interface { + io.Reader + Peek(n int) ([]byte, error) +} + +// IntoPeekReader creates a PeekReader from an io.Reader. +// If the reader already has a Peek method, it will just return the passed reader. +func intoPeekReader(r io.Reader) PeekReader { + if p, ok := r.(PeekReader); ok { + return p + } + + return bufio.NewReader(r) +} + +// CheckHeader checks whether the first bytes from a PeekReader match an expected header +func checkHeader(pr PeekReader, expectedHeader []byte) (bool, PeekReader, error) { + header, err := pr.Peek(len(expectedHeader)) + if err != nil { + // https://github.com/google/go-containerregistry/issues/367 + if err == io.EOF { + return false, pr, nil + } + return false, pr, err + } + return bytes.Equal(header, expectedHeader), pr, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go new file mode 100644 index 0000000000..69021bcee2 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go @@ -0,0 +1,54 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package estargz adapts the containerd estargz package to our abstractions. +package estargz + +import ( + "bytes" + "io" + + "github.com/containerd/stargz-snapshotter/estargz" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// Assert that what we're returning is an io.ReadCloser +var _ io.ReadCloser = (*estargz.Blob)(nil) + +// ReadCloser reads uncompressed tarball input from the io.ReadCloser and +// returns: +// - An io.ReadCloser from which compressed data may be read, and +// - A v1.Hash with the hash of the estargz table of contents, or +// - An error if the estargz processing encountered a problem. +// +// Refer to estargz for the options: +// https://pkg.go.dev/github.com/containerd/stargz-snapshotter/estargz@v0.4.1#Option +func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash, error) { + defer r.Close() + + // TODO(#876): Avoid buffering into memory. + bs, err := io.ReadAll(r) + if err != nil { + return nil, v1.Hash{}, err + } + br := bytes.NewReader(bs) + + rc, err := estargz.Build(io.NewSectionReader(br, 0, int64(len(bs))), opts...) + if err != nil { + return nil, v1.Hash{}, err + } + + h, err := v1.NewHash(rc.TOCDigest().String()) + return rc, h, err +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go new file mode 100644 index 0000000000..018c0f8c0f --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go @@ -0,0 +1,118 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gzip provides helper functions for interacting with gzipped streams. +package gzip + +import ( + "bufio" + "bytes" + "compress/gzip" + "io" + + "github.com/google/go-containerregistry/internal/and" +) + +// MagicHeader is the start of gzip files. +var MagicHeader = []byte{'\x1f', '\x8b'} + +// ReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// This uses gzip.BestSpeed for the compression level. +func ReadCloser(r io.ReadCloser) io.ReadCloser { + return ReadCloserLevel(r, gzip.BestSpeed) +} + +// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// Refer to compress/gzip for the level: +// https://golang.org/pkg/compress/gzip/#pkg-constants +func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { + pr, pw := io.Pipe() + + // For highly compressible layers, gzip.Writer will output a very small + // number of bytes per Write(). This is normally fine, but when pushing + // to a registry, we want to ensure that we're taking full advantage of + // the available bandwidth instead of sending tons of tiny writes over + // the wire. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(pw, 2<<16) + + // Returns err so we can pw.CloseWithError(err) + go func() error { + // TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect. + // Context: https://golang.org/issue/24283 + gw, err := gzip.NewWriterLevel(bw, level) + if err != nil { + return pw.CloseWithError(err) + } + + if _, err := io.Copy(gw, r); err != nil { + defer r.Close() + defer gw.Close() + return pw.CloseWithError(err) + } + + // Close gzip writer to Flush it and write gzip trailers. + if err := gw.Close(); err != nil { + return pw.CloseWithError(err) + } + + // Flush bufio writer to ensure we write out everything. + if err := bw.Flush(); err != nil { + return pw.CloseWithError(err) + } + + // We don't really care if these fail. + defer pw.Close() + defer r.Close() + + return nil + }() + + return pr +} + +// UnzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompressed data may be read. +func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &and.ReadCloser{ + Reader: gr, + CloseFunc: func() error { + // If the unzip fails, then this seems to return the same + // error as the read. We don't want this to interfere with + // us closing the main ReadCloser, since this could leave + // an open file descriptor (fails on Windows). + gr.Close() + return r.Close() + }, + }, nil +} + +// Is detects whether the input stream is compressed. +func Is(r io.Reader) (bool, error) { + magicHeader := make([]byte, 2) + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { + return false, err + } + return bytes.Equal(magicHeader, MagicHeader), nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/redact/redact.go new file mode 100644 index 0000000000..6d47570076 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/redact/redact.go @@ -0,0 +1,89 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package redact contains a simple context signal for redacting requests. +package redact + +import ( + "context" + "errors" + "net/url" +) + +type contextKey string + +var redactKey = contextKey("redact") + +// NewContext creates a new ctx with the reason for redaction. +func NewContext(ctx context.Context, reason string) context.Context { + return context.WithValue(ctx, redactKey, reason) +} + +// FromContext returns the redaction reason, if any. +func FromContext(ctx context.Context) (bool, string) { + reason, ok := ctx.Value(redactKey).(string) + return ok, reason +} + +// Error redacts potentially sensitive query parameter values in the URL from the error's message. +// +// If the error is a *url.Error, this returns a *url.Error with the URL redacted. +// Any other error type, or nil, is returned unchanged. +func Error(err error) error { + // If the error is a url.Error, we can redact the URL. + // Otherwise (including if err is nil), we can't redact. + var uerr *url.Error + if ok := errors.As(err, &uerr); !ok { + return err + } + u, perr := url.Parse(uerr.URL) + if perr != nil { + return err // If the URL can't be parsed, just return the original error. + } + uerr.URL = URL(u) // Update the URL to the redacted URL. + return uerr +} + +// The set of query string keys that we expect to send as part of the registry +// protocol. Anything else is potentially dangerous to leak, as it's probably +// from a redirect. These redirects often included tokens or signed URLs. +var paramAllowlist = map[string]struct{}{ + // Token exchange + "scope": {}, + "service": {}, + // Cross-repo mounting + "mount": {}, + "from": {}, + // Layer PUT + "digest": {}, + // Listing tags and catalog + "n": {}, + "last": {}, +} + +// URL redacts potentially sensitive query parameter values from the URL's query string. +func URL(u *url.URL) string { + qs := u.Query() + for k, v := range qs { + for i := range v { + if _, ok := paramAllowlist[k]; !ok { + // key is not in the Allowlist + v[i] = "REDACTED" + } + } + } + r := *u + r.RawQuery = qs.Encode() + return r.Redacted() +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/retry/retry.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/retry/retry.go new file mode 100644 index 0000000000..c9e3564504 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/retry/retry.go @@ -0,0 +1,94 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package retry provides methods for retrying operations. It is a thin wrapper +// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier. +package retry + +import ( + "context" + "errors" + "fmt" + + "github.com/google/go-containerregistry/internal/retry/wait" +) + +// Backoff is an alias of our own wait.Backoff to avoid name conflicts with +// the kubernetes wait package. Typing retry.Backoff is aesier than fixing +// the wrong import every time you use wait.Backoff. +type Backoff = wait.Backoff + +// This is implemented by several errors in the net package as well as our +// transport.Error. +type temporary interface { + Temporary() bool +} + +// IsTemporary returns true if err implements Temporary() and it returns true. +func IsTemporary(err error) bool { + if errors.Is(err, context.DeadlineExceeded) { + return false + } + if te, ok := err.(temporary); ok && te.Temporary() { + return true + } + return false +} + +// IsNotNil returns true if err is not nil. +func IsNotNil(err error) bool { + return err != nil +} + +// Predicate determines whether an error should be retried. +type Predicate func(error) (retry bool) + +// Retry retries a given function, f, until a predicate is satisfied, using +// exponential backoff. If the predicate is never satisfied, it will return the +// last error returned by f. +func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) { + if f == nil { + return fmt.Errorf("nil f passed to retry") + } + if p == nil { + return fmt.Errorf("nil p passed to retry") + } + + condition := func() (bool, error) { + err = f() + if p(err) { + return false, nil + } + return true, err + } + + wait.ExponentialBackoff(backoff, condition) + return +} + +type contextKey string + +var key = contextKey("never") + +// Never returns a context that signals something should not be retried. +// This is a hack and can be used to communicate across package boundaries +// to avoid retry amplification. +func Never(ctx context.Context) context.Context { + return context.WithValue(ctx, key, true) +} + +// Ever returns true if the context was wrapped by Never. +func Ever(ctx context.Context) bool { + return ctx.Value(key) == nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go new file mode 100644 index 0000000000..ab06e5f109 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go @@ -0,0 +1,123 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wait is a subset of k8s.io/apimachinery to avoid conflicts +// in dependencies (specifically, logging). +package wait + +import ( + "errors" + "math/rand" + "time" +) + +// Jitter returns a time.Duration between duration and duration + maxFactor * +// duration. +// +// This allows clients to avoid converging on periodic behavior. If maxFactor +// is 0.0, a suggested default value will be chosen. +func Jitter(duration time.Duration, maxFactor float64) time.Duration { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + return wait +} + +// ErrWaitTimeout is returned when the condition exited without success. +var ErrWaitTimeout = errors.New("timed out waiting for the condition") + +// ConditionFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. +type ConditionFunc func() (done bool, err error) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step (1) returns an amount of time to sleep determined by the +// original Duration and Jitter and (2) mutates the provided Backoff +// to update its Steps and Duration. +func (b *Backoff) Step() time.Duration { + if b.Steps < 1 { + if b.Jitter > 0 { + return Jitter(b.Duration, b.Jitter) + } + return b.Duration + } + b.Steps-- + + duration := b.Duration + + // calculate the next step + if b.Factor != 0 { + b.Duration = time.Duration(float64(b.Duration) * b.Factor) + if b.Cap > 0 && b.Duration > b.Cap { + b.Duration = b.Cap + b.Steps = 0 + } + } + + if b.Jitter > 0 { + duration = Jitter(duration, b.Jitter) + } + return duration +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := condition(); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/verify/verify.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/verify/verify.go new file mode 100644 index 0000000000..463f7e4b39 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/verify/verify.go @@ -0,0 +1,122 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package verify provides a ReadCloser that verifies content matches the +// expected hash values. +package verify + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + + "github.com/google/go-containerregistry/internal/and" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// SizeUnknown is a sentinel value to indicate that the expected size is not known. +const SizeUnknown = -1 + +type verifyReader struct { + inner io.Reader + hasher hash.Hash + expected v1.Hash + gotSize, wantSize int64 +} + +// Error provides information about the failed hash verification. +type Error struct { + got string + want v1.Hash + gotSize int64 +} + +func (v Error) Error() string { + return fmt.Sprintf("error verifying %s checksum after reading %d bytes; got %q, want %q", + v.want.Algorithm, v.gotSize, v.got, v.want) +} + +// Read implements io.Reader +func (vc *verifyReader) Read(b []byte) (int, error) { + n, err := vc.inner.Read(b) + vc.gotSize += int64(n) + if err == io.EOF { + if vc.wantSize != SizeUnknown && vc.gotSize != vc.wantSize { + return n, fmt.Errorf("error verifying size; got %d, want %d", vc.gotSize, vc.wantSize) + } + got := hex.EncodeToString(vc.hasher.Sum(nil)) + if want := vc.expected.Hex; got != want { + return n, Error{ + got: vc.expected.Algorithm + ":" + got, + want: vc.expected, + gotSize: vc.gotSize, + } + } + } + return n, err +} + +// ReadCloser wraps the given io.ReadCloser to verify that its contents match +// the provided v1.Hash before io.EOF is returned. +// +// The reader will only be read up to size bytes, to prevent resource +// exhaustion. If EOF is returned before size bytes are read, an error is +// returned. +// +// A size of SizeUnknown (-1) indicates disables size verification when the size +// is unknown ahead of time. +func ReadCloser(r io.ReadCloser, size int64, h v1.Hash) (io.ReadCloser, error) { + w, err := v1.Hasher(h.Algorithm) + if err != nil { + return nil, err + } + r2 := io.TeeReader(r, w) // pass all writes to the hasher. + if size != SizeUnknown { + r2 = io.LimitReader(r2, size) // if we know the size, limit to that size. + } + return &and.ReadCloser{ + Reader: &verifyReader{ + inner: r2, + hasher: w, + expected: h, + wantSize: size, + }, + CloseFunc: r.Close, + }, nil +} + +// Descriptor verifies that the embedded Data field matches the Size and Digest +// fields of the given v1.Descriptor, returning an error if the Data field is +// missing or if it contains incorrect data. +func Descriptor(d v1.Descriptor) error { + if d.Data == nil { + return errors.New("error verifying descriptor; Data == nil") + } + + h, sz, err := v1.SHA256(bytes.NewReader(d.Data)) + if err != nil { + return err + } + if h != d.Digest { + return fmt.Errorf("error verifying Digest; got %q, want %q", h, d.Digest) + } + if sz != d.Size { + return fmt.Errorf("error verifying Size; got %d, want %d", sz, d.Size) + } + + return nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go new file mode 100644 index 0000000000..cccf54a308 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go @@ -0,0 +1,116 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zstd provides helper functions for interacting with zstd streams. +package zstd + +import ( + "bufio" + "bytes" + "io" + + "github.com/google/go-containerregistry/internal/and" + "github.com/klauspost/compress/zstd" +) + +// MagicHeader is the start of zstd files. +var MagicHeader = []byte{'\x28', '\xb5', '\x2f', '\xfd'} + +// ReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// This uses zstd level 1 for the compression. +func ReadCloser(r io.ReadCloser) io.ReadCloser { + return ReadCloserLevel(r, 1) +} + +// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { + pr, pw := io.Pipe() + + // For highly compressible layers, zstd.Writer will output a very small + // number of bytes per Write(). This is normally fine, but when pushing + // to a registry, we want to ensure that we're taking full advantage of + // the available bandwidth instead of sending tons of tiny writes over + // the wire. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(pw, 2<<16) + + // Returns err so we can pw.CloseWithError(err) + go func() error { + // TODO(go1.14): Just defer {pw,zw,r}.Close like you'd expect. + // Context: https://golang.org/issue/24283 + zw, err := zstd.NewWriter(bw, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level))) + if err != nil { + return pw.CloseWithError(err) + } + + if _, err := io.Copy(zw, r); err != nil { + defer r.Close() + defer zw.Close() + return pw.CloseWithError(err) + } + + // Close zstd writer to Flush it and write zstd trailers. + if err := zw.Close(); err != nil { + return pw.CloseWithError(err) + } + + // Flush bufio writer to ensure we write out everything. + if err := bw.Flush(); err != nil { + return pw.CloseWithError(err) + } + + // We don't really care if these fail. + defer pw.Close() + defer r.Close() + + return nil + }() + + return pr +} + +// UnzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompressed data may be read. +func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := zstd.NewReader(r) + if err != nil { + return nil, err + } + return &and.ReadCloser{ + Reader: gr, + CloseFunc: func() error { + // If the unzip fails, then this seems to return the same + // error as the read. We don't want this to interfere with + // us closing the main ReadCloser, since this could leave + // an open file descriptor (fails on Windows). + gr.Close() + return r.Close() + }, + }, nil +} + +// Is detects whether the input stream is compressed. +func Is(r io.Reader) (bool, error) { + magicHeader := make([]byte, 4) + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { + return false, err + } + return bytes.Equal(magicHeader, MagicHeader), nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/README.md new file mode 100644 index 0000000000..042bddec04 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/README.md @@ -0,0 +1,322 @@ +# `authn` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/authn?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/authn) + +This README outlines how we acquire and use credentials when interacting with a registry. + +As much as possible, we attempt to emulate `docker`'s authentication behavior and configuration so that this library "just works" if you've already configured credentials that work with `docker`; however, when things don't work, a basic understanding of what's going on can help with debugging. + +The official documentation for how authentication with `docker` works is (reasonably) scattered across several different sites and GitHub repositories, so we've tried to summarize the relevant bits here. + +## tl;dr for consumers of this package + +By default, [`pkg/v1/remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) uses [`Anonymous`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#Anonymous) credentials (i.e. _none_), which for most registries will only allow read access to public images. + +To use the credentials found in your Docker config file, you can use the [`DefaultKeychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#DefaultKeychain), e.g.: + +```go +package main + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + ref, err := name.ParseReference("registry.example.com/private/repo") + if err != nil { + panic(err) + } + + // Fetch the manifest using default credentials. + img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + panic(err) + } + + // Prints the digest of registry.example.com/private/repo + fmt.Println(img.Digest) +} +``` + +The `DefaultKeychain` will use credentials as described in your Docker config file -- usually `~/.docker/config.json`, or `%USERPROFILE%\.docker\config.json` on Windows -- or the location described by the `DOCKER_CONFIG` environment variable, if set. + +If those are not found, `DefaultKeychain` will look for credentials configured using [Podman's expectation](https://docs.podman.io/en/latest/markdown/podman-login.1.html) that these are found in `${XDG_RUNTIME_DIR}/containers/auth.json`. + +[See below](#docker-config-auth) for more information about what is configured in this file. + +## Emulating Cloud Provider Credential Helpers + +[`pkg/v1/google.Keychain`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#Keychain) provides a `Keychain` implementation that emulates [`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr) to find credentials in the environment. +See [`google.NewEnvAuthenticator`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#NewEnvAuthenticator) and [`google.NewGcloudAuthenticator`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#NewGcloudAuthenticator) for more information. + +To emulate other credential helpers without requiring them to be available as executables, [`NewKeychainFromHelper`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/authn#NewKeychainFromHelper) provides an adapter that takes a Go implementation satisfying a subset of the [`credentials.Helper`](https://pkg.go.dev/github.com/docker/docker-credential-helpers/credentials#Helper) interface, and makes it available as a `Keychain`. + +This means that you can emulate, for example, [Amazon ECR's `docker-credential-ecr-login` credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) using the same implementation: + +```go +import ( + ecr "github.com/awslabs/amazon-ecr-credential-helper/ecr-login" + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + // ... + ecrHelper := ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}} + img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.NewKeychainFromHelper(ecrHelper))) + if err != nil { + panic(err) + } + // ... +} +``` + +Likewise, you can emulate [Azure's ACR `docker-credential-acr-env` credential helper](https://github.com/chrismellard/docker-credential-acr-env): + +```go +import ( + "github.com/chrismellard/docker-credential-acr-env/pkg/credhelper" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + // ... + acrHelper := credhelper.NewACRCredentialsHelper() + img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.NewKeychainFromHelper(acrHelper))) + if err != nil { + panic(err) + } + // ... +} +``` + + + +## Using Multiple `Keychain`s + +[`NewMultiKeychain`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/authn#NewMultiKeychain) allows you to specify multiple `Keychain` implementations, which will be checked in order when credentials are needed. + +For example: + +```go +kc := authn.NewMultiKeychain( + authn.DefaultKeychain, + google.Keychain, + authn.NewKeychainFromHelper(ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}}), + authn.NewKeychainFromHelper(acr.ACRCredHelper{}), +) +``` + +This multi-keychain will: + +- first check for credentials found in the Docker config file, as describe above, then +- check for GCP credentials available in the environment, as described above, then +- check for ECR credentials by emulating the ECR credential helper, then +- check for ACR credentials by emulating the ACR credential helper. + +If any keychain implementation is able to provide credentials for the request, they will be used, and further keychain implementations will not be consulted. + +If no implementations are able to provide credentials, `Anonymous` credentials will be used. + +## Docker Config Auth + +What follows attempts to gather useful information about Docker's config.json and make it available in one place. + +If you have questions, please [file an issue](https://github.com/google/go-containerregistry/issues/new). + +### Plaintext + +The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this: + +```json +{ + "auths": { + "registry.example.com": { + "auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI=" + } + } +} +``` + +The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617). + +**NOTE**: This means that your credentials are stored _in plaintext_: + +```bash +$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d +AzureDiamond:hunter2 +``` + +For what it's worth, this config file is equivalent to: + +```json +{ + "auths": { + "registry.example.com": { + "username": "AzureDiamond", + "password": "hunter2" + } + } +} +``` + +... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`. + +### Helpers + +If you log in like this, `docker` will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should! + +To configure a global credential helper: +```json +{ + "credsStore": "osxkeychain" +} +``` + +To configure a per-registry credential helper: +```json +{ + "credHelpers": { + "gcr.io": "gcr" + } +} +``` + +We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry. + +## Credential Helpers + +The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file. + +The protocol has several verbs, but the one we most care about is `get`. + +For example, using the following config file: +```json +{ + "credHelpers": { + "gcr.io": "gcr", + "eu.gcr.io": "gcr" + } +} +``` + +To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find +the credential helper for `gcr.io` is `gcr`. By appending that value to +`docker-credential-`, we can get the name of the binary we need to use. + +For this example, that's `docker-credential-gcr`, which must be on our `$PATH`. +We'll then invoke that binary to get credentials: + +```bash +$ echo "gcr.io" | docker-credential-gcr get +{"Username":"_token","Secret":""} +``` + +You can configure the same credential helper for multiple registries, which is +why we need to pass the domain in via STDIN, e.g. if we were trying to access +`eu.gcr.io`, we'd do this instead: + +```bash +$ echo "eu.gcr.io" | docker-credential-gcr get +{"Username":"_token","Secret":""} +``` + +### Debugging credential helpers + +If a credential helper is configured but doesn't seem to be working, it can be +challenging to debug. Implementing a fake credential helper lets you poke around +to make it easier to see where the failure is happening. + +This "implements" a credential helper with hard-coded values: +``` +#!/usr/bin/env bash +echo '{"Username":"","Secret":"hunter2"}' +``` + + +This implements a credential helper that prints the output of +`docker-credential-gcr` to both stderr and whatever called it, which allows you +to snoop on another credential helper: +``` +#!/usr/bin/env bash +docker-credential-gcr $@ | tee >(cat 1>&2) +``` + +Put those files somewhere on your path, naming them e.g. +`docker-credential-hardcoded` and `docker-credential-tee`, then modify the +config file to use them: + +```json +{ + "credHelpers": { + "gcr.io": "tee", + "eu.gcr.io": "hardcoded" + } +} +``` + +The `docker-credential-tee` trick works with both `crane` and `docker`: + +```bash +$ crane manifest gcr.io/google-containers/pause > /dev/null +{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} + +$ docker pull gcr.io/google-containers/pause +Using default tag: latest +{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} +latest: Pulling from google-containers/pause +a3ed95caeb02: Pull complete +4964c72cd024: Pull complete +Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f +Status: Downloaded newer image for gcr.io/google-containers/pause:latest +gcr.io/google-containers/pause:latest +``` + +## The Registry + +There are two methods for authenticating against a registry: +[token](https://docs.docker.com/registry/spec/auth/token/) and +[oauth2](https://docs.docker.com/registry/spec/auth/oauth/). + +Both methods are used to acquire an opaque `Bearer` token (or +[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21)) +to use in the `Authorization` header. The registry will return a `401 +Unauthorized` during the [version +check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check) +(or during normal operations) with +[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge +indicating how to proceed. + +### Token + +If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6) +or +[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7), +we'll use the token method for authentication: + +![basic](../../images/credhelper-basic.svg) + +### OAuth 2 + +If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18) +we'll use the oauth2 method for authentication: + +![oauth](../../images/credhelper-oauth.svg) + +This happens when a credential helper returns a response with the +[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16) +set to `` (no, that's not a placeholder, the literal string `""`). +It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926). + +We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)), +since it's impossible to determine from the registry response whether we should +use oauth, and the token method for authentication is widely implemented by +registries. diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go new file mode 100644 index 0000000000..83214957d5 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go @@ -0,0 +1,26 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// anonymous implements Authenticator for anonymous authentication. +type anonymous struct{} + +// Authorization implements Authenticator. +func (a *anonymous) Authorization() (*AuthConfig, error) { + return &AuthConfig{}, nil +} + +// Anonymous is a singleton Authenticator for providing anonymous auth. +var Anonymous Authenticator = &anonymous{} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go new file mode 100644 index 0000000000..0111f1ae72 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// auth is an Authenticator that simply returns the wrapped AuthConfig. +type auth struct { + config AuthConfig +} + +// FromConfig returns an Authenticator that just returns the given AuthConfig. +func FromConfig(cfg AuthConfig) Authenticator { + return &auth{cfg} +} + +// Authorization implements Authenticator. +func (a *auth) Authorization() (*AuthConfig, error) { + return &a.config, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go new file mode 100644 index 0000000000..1555efae04 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go @@ -0,0 +1,132 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "strings" +) + +// Authenticator is used to authenticate Docker transports. +type Authenticator interface { + // Authorization returns the value to use in an http transport's Authorization header. + Authorization() (*AuthConfig, error) +} + +// ContextAuthenticator is like Authenticator, but allows for context to be passed in. +type ContextAuthenticator interface { + // Authorization returns the value to use in an http transport's Authorization header. + AuthorizationContext(context.Context) (*AuthConfig, error) +} + +// Authorization calls AuthorizationContext with ctx if the given [Authenticator] implements [ContextAuthenticator], +// otherwise it calls Resolve with the given [Resource]. +func Authorization(ctx context.Context, authn Authenticator) (*AuthConfig, error) { + if actx, ok := authn.(ContextAuthenticator); ok { + return actx.AuthorizationContext(ctx) + } + + return authn.Authorization() +} + +// AuthConfig contains authorization information for connecting to a Registry +// Inlined what we use from github.com/docker/cli/cli/config/types +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// This is effectively a copy of the type AuthConfig. This simplifies +// JSON unmarshalling since AuthConfig methods are not inherited +type authConfig AuthConfig + +// UnmarshalJSON implements json.Unmarshaler +func (a *AuthConfig) UnmarshalJSON(data []byte) error { + var shadow authConfig + err := json.Unmarshal(data, &shadow) + if err != nil { + return err + } + + *a = (AuthConfig)(shadow) + + if len(shadow.Auth) != 0 { + var derr error + a.Username, a.Password, derr = decodeDockerConfigFieldAuth(shadow.Auth) + if derr != nil { + err = fmt.Errorf("unable to decode auth field: %w", derr) + } + } else if len(a.Username) != 0 && len(a.Password) != 0 { + a.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) + } + + return err +} + +// MarshalJSON implements json.Marshaler +func (a AuthConfig) MarshalJSON() ([]byte, error) { + shadow := (authConfig)(a) + shadow.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) + return json.Marshal(shadow) +} + +// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a +// username and a password. The format of the auth field is base64(:). +// +// From https://github.com/kubernetes/kubernetes/blob/75e49ec824b183288e1dbaccfd7dbe77d89db381/pkg/credentialprovider/config.go +// Copyright 2014 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 +func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { + var decoded []byte + // StdEncoding can only decode padded string + // RawStdEncoding can only decode unpadded string + if strings.HasSuffix(strings.TrimSpace(field), "=") { + // decode padded data + decoded, err = base64.StdEncoding.DecodeString(field) + } else { + // decode unpadded data + decoded, err = base64.RawStdEncoding.DecodeString(field) + } + + if err != nil { + return + } + + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + err = fmt.Errorf("must be formatted as base64(username:password)") + return + } + + username = parts[0] + password = parts[1] + + return +} + +func encodeDockerConfigFieldAuth(username, password string) string { + return base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go new file mode 100644 index 0000000000..500cb6616f --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Basic implements Authenticator for basic authentication. +type Basic struct { + Username string + Password string +} + +// Authorization implements Authenticator. +func (b *Basic) Authorization() (*AuthConfig, error) { + return &AuthConfig{ + Username: b.Username, + Password: b.Password, + }, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go new file mode 100644 index 0000000000..4cf86df92f --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Bearer implements Authenticator for bearer authentication. +type Bearer struct { + Token string `json:"token"` +} + +// Authorization implements Authenticator. +func (b *Bearer) Authorization() (*AuthConfig, error) { + return &AuthConfig{ + RegistryToken: b.Token, + }, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go new file mode 100644 index 0000000000..c2a5fc0267 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package authn defines different methods of authentication for +// talking to a container registry. +package authn diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go new file mode 100644 index 0000000000..6e8814d808 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -0,0 +1,294 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "context" + "os" + "path/filepath" + "sync" + "time" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/types" + "github.com/google/go-containerregistry/pkg/name" + "github.com/mitchellh/go-homedir" +) + +// Resource represents a registry or repository that can be authenticated against. +type Resource interface { + // String returns the full string representation of the target, e.g. + // gcr.io/my-project or just gcr.io. + String() string + + // RegistryStr returns just the registry portion of the target, e.g. for + // gcr.io/my-project, this should just return gcr.io. This is needed to + // pull out an appropriate hostname. + RegistryStr() string +} + +// Keychain is an interface for resolving an image reference to a credential. +type Keychain interface { + // Resolve looks up the most appropriate credential for the specified target. + Resolve(Resource) (Authenticator, error) +} + +// ContextKeychain is like Keychain, but allows for context to be passed in. +type ContextKeychain interface { + ResolveContext(context.Context, Resource) (Authenticator, error) +} + +// defaultKeychain implements Keychain with the semantics of the standard Docker +// credential keychain. +type defaultKeychain struct { + mu sync.Mutex +} + +var ( + // DefaultKeychain implements Keychain by interpreting the docker config file. + DefaultKeychain = &defaultKeychain{} +) + +const ( + // DefaultAuthKey is the key used for dockerhub in config files, which + // is hardcoded for historical reasons. + DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/" +) + +// Resolve calls ResolveContext with ctx if the given [Keychain] implements [ContextKeychain], +// otherwise it calls Resolve with the given [Resource]. +func Resolve(ctx context.Context, keychain Keychain, target Resource) (Authenticator, error) { + if rctx, ok := keychain.(ContextKeychain); ok { + return rctx.ResolveContext(ctx, target) + } + + return keychain.Resolve(target) +} + +// ResolveContext implements ContextKeychain. +func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { + return dk.ResolveContext(context.Background(), target) +} + +// Resolve implements Keychain. +func (dk *defaultKeychain) ResolveContext(_ context.Context, target Resource) (Authenticator, error) { + dk.mu.Lock() + defer dk.mu.Unlock() + + // Podman users may have their container registry auth configured in a + // different location, that Docker packages aren't aware of. + // If the Docker config file isn't found, we'll fallback to look where + // Podman configures it, and parse that as a Docker auth config instead. + + // First, check $HOME/.docker/config.json + foundDockerConfig := false + home, err := homedir.Dir() + if err == nil { + foundDockerConfig = fileExists(filepath.Join(home, ".docker/config.json")) + } + // If $HOME/.docker/config.json isn't found, check $DOCKER_CONFIG (if set) + if !foundDockerConfig && os.Getenv("DOCKER_CONFIG") != "" { + foundDockerConfig = fileExists(filepath.Join(os.Getenv("DOCKER_CONFIG"), "config.json")) + } + // If either of those locations are found, load it using Docker's + // config.Load, which may fail if the config can't be parsed. + // + // If neither was found, look for Podman's auth at + // $REGISTRY_AUTH_FILE or $XDG_RUNTIME_DIR/containers/auth.json + // and attempt to load it as a Docker config. + // + // If neither are found, fallback to Anonymous. + var cf *configfile.ConfigFile + if foundDockerConfig { + cf, err = config.Load(os.Getenv("DOCKER_CONFIG")) + if err != nil { + return nil, err + } + } else if fileExists(os.Getenv("REGISTRY_AUTH_FILE")) { + f, err := os.Open(os.Getenv("REGISTRY_AUTH_FILE")) + if err != nil { + return nil, err + } + defer f.Close() + cf, err = config.LoadFromReader(f) + if err != nil { + return nil, err + } + } else if fileExists(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) { + f, err := os.Open(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) + if err != nil { + return nil, err + } + defer f.Close() + cf, err = config.LoadFromReader(f) + if err != nil { + return nil, err + } + } else { + return Anonymous, nil + } + + // See: + // https://github.com/google/ko/issues/90 + // https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404 + var cfg, empty types.AuthConfig + for _, key := range []string{ + target.String(), + target.RegistryStr(), + } { + if key == name.DefaultRegistry { + key = DefaultAuthKey + } + + cfg, err = cf.GetAuthConfig(key) + if err != nil { + return nil, err + } + // cf.GetAuthConfig automatically sets the ServerAddress attribute. Since + // we don't make use of it, clear the value for a proper "is-empty" test. + // See: https://github.com/google/go-containerregistry/issues/1510 + cfg.ServerAddress = "" + if cfg != empty { + break + } + } + if cfg == empty { + return Anonymous, nil + } + + return FromConfig(AuthConfig{ + Username: cfg.Username, + Password: cfg.Password, + Auth: cfg.Auth, + IdentityToken: cfg.IdentityToken, + RegistryToken: cfg.RegistryToken, + }), nil +} + +// fileExists returns true if the given path exists and is not a directory. +func fileExists(path string) bool { + fi, err := os.Stat(path) + return err == nil && !fi.IsDir() +} + +// Helper is a subset of the Docker credential helper credentials.Helper +// interface used by NewKeychainFromHelper. +// +// See: +// https://pkg.go.dev/github.com/docker/docker-credential-helpers/credentials#Helper +type Helper interface { + Get(serverURL string) (string, string, error) +} + +// NewKeychainFromHelper returns a Keychain based on a Docker credential helper +// implementation that can Get username and password credentials for a given +// server URL. +func NewKeychainFromHelper(h Helper) Keychain { return wrapper{h} } + +type wrapper struct{ h Helper } + +func (w wrapper) Resolve(r Resource) (Authenticator, error) { + return w.ResolveContext(context.Background(), r) +} + +func (w wrapper) ResolveContext(_ context.Context, r Resource) (Authenticator, error) { + u, p, err := w.h.Get(r.RegistryStr()) + if err != nil { + return Anonymous, nil + } + // If the secret being stored is an identity token, the Username should be set to + // ref: https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol + if u == "" { + return FromConfig(AuthConfig{Username: u, IdentityToken: p}), nil + } + return FromConfig(AuthConfig{Username: u, Password: p}), nil +} + +func RefreshingKeychain(inner Keychain, duration time.Duration) Keychain { + return &refreshingKeychain{ + keychain: inner, + duration: duration, + } +} + +type refreshingKeychain struct { + keychain Keychain + duration time.Duration + clock func() time.Time +} + +func (r *refreshingKeychain) Resolve(target Resource) (Authenticator, error) { + return r.ResolveContext(context.Background(), target) +} + +func (r *refreshingKeychain) ResolveContext(ctx context.Context, target Resource) (Authenticator, error) { + last := time.Now() + auth, err := Resolve(ctx, r.keychain, target) + if err != nil || auth == Anonymous { + return auth, err + } + return &refreshing{ + target: target, + keychain: r.keychain, + last: last, + cached: auth, + duration: r.duration, + clock: r.clock, + }, nil +} + +type refreshing struct { + sync.Mutex + target Resource + keychain Keychain + + duration time.Duration + + last time.Time + cached Authenticator + + // for testing + clock func() time.Time +} + +func (r *refreshing) Authorization() (*AuthConfig, error) { + return r.AuthorizationContext(context.Background()) +} + +func (r *refreshing) AuthorizationContext(ctx context.Context) (*AuthConfig, error) { + r.Lock() + defer r.Unlock() + if r.cached == nil || r.expired() { + r.last = r.now() + auth, err := Resolve(ctx, r.keychain, r.target) + if err != nil { + return nil, err + } + r.cached = auth + } + return Authorization(ctx, r.cached) +} + +func (r *refreshing) now() time.Time { + if r.clock == nil { + return time.Now() + } + return r.clock() +} + +func (r *refreshing) expired() bool { + return r.now().Sub(r.last) > r.duration +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go new file mode 100644 index 0000000000..fe241a0fd9 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import "context" + +type multiKeychain struct { + keychains []Keychain +} + +// Assert that our multi-keychain implements Keychain. +var _ (Keychain) = (*multiKeychain)(nil) + +// NewMultiKeychain composes a list of keychains into one new keychain. +func NewMultiKeychain(kcs ...Keychain) Keychain { + return &multiKeychain{keychains: kcs} +} + +// Resolve implements Keychain. +func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) { + return mk.ResolveContext(context.Background(), target) +} + +func (mk *multiKeychain) ResolveContext(ctx context.Context, target Resource) (Authenticator, error) { + for _, kc := range mk.keychains { + auth, err := Resolve(ctx, kc, target) + if err != nil { + return nil, err + } + if auth != Anonymous { + return auth, nil + } + } + return Anonymous, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go new file mode 100644 index 0000000000..6686c2d8d9 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go @@ -0,0 +1,26 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compression abstracts over gzip and zstd. +package compression + +// Compression is an enumeration of the supported compression algorithms +type Compression string + +// The collection of known MediaType values. +const ( + None Compression = "none" + GZip Compression = "gzip" + ZStd Compression = "zstd" +) diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go new file mode 100644 index 0000000000..a5d25b1887 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logs exposes the loggers used by this library. +package logs + +import ( + "io" + "log" +) + +var ( + // Warn is used to log non-fatal errors. + Warn = log.New(io.Discard, "", log.LstdFlags) + + // Progress is used to log notable, successful events. + Progress = log.New(io.Discard, "", log.LstdFlags) + + // Debug is used to log information that is useful for debugging. + Debug = log.New(io.Discard, "", log.LstdFlags) +) + +// Enabled checks to see if the logger's writer is set to something other +// than io.Discard. This allows callers to avoid expensive operations +// that will end up in /dev/null anyway. +func Enabled(l *log.Logger) bool { + return l.Writer() != io.Discard +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/README.md new file mode 100644 index 0000000000..4889b8446a --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/README.md @@ -0,0 +1,3 @@ +# `name` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name) diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/check.go new file mode 100644 index 0000000000..e9a240a3e5 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/check.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" + "unicode/utf8" +) + +// stripRunesFn returns a function which returns -1 (i.e. a value which +// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. +func stripRunesFn(runes string) func(rune) rune { + return func(r rune) rune { + if strings.ContainsRune(runes, r) { + return -1 + } + return r + } +} + +// checkElement checks a given named element matches character and length restrictions. +// Returns true if the given element adheres to the given restrictions, false otherwise. +func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { + numRunes := utf8.RuneCountInString(element) + if (numRunes < minRunes) || (maxRunes < numRunes) { + return newErrBadName("%s must be between %d and %d characters in length: %s", name, minRunes, maxRunes, element) + } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { + return newErrBadName("%s can only contain the characters `%s`: %s", name, allowedRunes, element) + } + return nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/digest.go new file mode 100644 index 0000000000..5b8eb4ff46 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -0,0 +1,133 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + // nolint: depguard + _ "crypto/sha256" // Recommended by go-digest. + "encoding" + "encoding/json" + "strings" + + "github.com/opencontainers/go-digest" +) + +const digestDelim = "@" + +// Digest stores a digest name in a structured form. +type Digest struct { + Repository + digest string + original string +} + +var _ Reference = (*Digest)(nil) +var _ encoding.TextMarshaler = (*Digest)(nil) +var _ encoding.TextUnmarshaler = (*Digest)(nil) +var _ json.Marshaler = (*Digest)(nil) +var _ json.Unmarshaler = (*Digest)(nil) + +// Context implements Reference. +func (d Digest) Context() Repository { + return d.Repository +} + +// Identifier implements Reference. +func (d Digest) Identifier() string { + return d.DigestStr() +} + +// DigestStr returns the digest component of the Digest. +func (d Digest) DigestStr() string { + return d.digest +} + +// Name returns the name from which the Digest was derived. +func (d Digest) Name() string { + return d.Repository.Name() + digestDelim + d.DigestStr() +} + +// String returns the original input string. +func (d Digest) String() string { + return d.original +} + +// MarshalJSON formats the digest into a string for JSON serialization. +func (d Digest) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// UnmarshalJSON parses a JSON string into a Digest. +func (d *Digest) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewDigest(s) + if err != nil { + return err + } + *d = n + return nil +} + +// MarshalText formats the digest into a string for text serialization. +func (d Digest) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses a text string into a Digest. +func (d *Digest) UnmarshalText(data []byte) error { + n, err := NewDigest(string(data)) + if err != nil { + return err + } + *d = n + return nil +} + +// NewDigest returns a new Digest representing the given name. +func NewDigest(name string, opts ...Option) (Digest, error) { + // Split on "@" + parts := strings.Split(name, digestDelim) + if len(parts) != 2 { + return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) + } + base := parts[0] + dig := parts[1] + prefix := digest.Canonical.String() + ":" + if !strings.HasPrefix(dig, prefix) { + return Digest{}, newErrBadName("unsupported digest algorithm: %s", dig) + } + hex := strings.TrimPrefix(dig, prefix) + if err := digest.Canonical.Validate(hex); err != nil { + return Digest{}, err + } + + tag, err := NewTag(base, opts...) + if err == nil { + base = tag.Repository.Name() + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Digest{}, err + } + return Digest{ + Repository: repo, + digest: dig, + original: name, + }, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/doc.go new file mode 100644 index 0000000000..b294794dc1 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/doc.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package name defines structured types for representing image references. +// +// What's in a name? For image references, not nearly enough! +// +// Image references look a lot like URLs, but they differ in that they don't +// contain the scheme (http or https), they can end with a :tag or a @digest +// (the latter being validated), and they perform defaulting for missing +// components. +// +// Since image references don't contain the scheme, we do our best to infer +// if we use http or https from the given hostname. We allow http fallback for +// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in +// ".local", or is in the "private" address space per RFC 1918. For everything +// else, we assume https only. To override this heuristic, use the Insecure +// option. +// +// Image references with a digest signal to us that we should verify the content +// of the image matches the digest. E.g. when pulling a Digest reference, we'll +// calculate the sha256 of the manifest returned by the registry and error out +// if it doesn't match what we asked for. +// +// For defaulting, we interpret "ubuntu" as +// "index.docker.io/library/ubuntu:latest" because we add the missing repo +// "library", the missing registry "index.docker.io", and the missing tag +// "latest". To disable this defaulting, use the StrictValidation option. This +// is useful e.g. to only allow image references that explicitly set a tag or +// digest, so that you don't accidentally pull "latest". +package name diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/errors.go new file mode 100644 index 0000000000..bf004ffcfb --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/errors.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "errors" + "fmt" +) + +// ErrBadName is an error for when a bad docker name is supplied. +type ErrBadName struct { + info string +} + +func (e *ErrBadName) Error() string { + return e.info +} + +// Is reports whether target is an error of type ErrBadName +func (e *ErrBadName) Is(target error) bool { + var berr *ErrBadName + return errors.As(target, &berr) +} + +// newErrBadName returns a ErrBadName which returns the given formatted string from Error(). +func newErrBadName(fmtStr string, args ...any) *ErrBadName { + return &ErrBadName{fmt.Sprintf(fmtStr, args...)} +} + +// IsErrBadName returns true if the given error is an ErrBadName. +// +// Deprecated: Use errors.Is. +func IsErrBadName(err error) bool { + var berr *ErrBadName + return errors.As(err, &berr) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/options.go new file mode 100644 index 0000000000..d14fedcdad --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/options.go @@ -0,0 +1,83 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +const ( + // DefaultRegistry is the registry name that will be used if no registry + // provided and the default is not overridden. + DefaultRegistry = "index.docker.io" + defaultRegistryAlias = "docker.io" + + // DefaultTag is the tag name that will be used if no tag provided and the + // default is not overridden. + DefaultTag = "latest" +) + +type options struct { + strict bool // weak by default + insecure bool // secure by default + defaultRegistry string + defaultTag string +} + +func makeOptions(opts ...Option) options { + opt := options{ + defaultRegistry: DefaultRegistry, + defaultTag: DefaultTag, + } + for _, o := range opts { + o(&opt) + } + return opt +} + +// Option is a functional option for name parsing. +type Option func(*options) + +// StrictValidation is an Option that requires image references to be fully +// specified; i.e. no defaulting for registry (dockerhub), repo (library), +// or tag (latest). +func StrictValidation(opts *options) { + opts.strict = true +} + +// WeakValidation is an Option that sets defaults when parsing names, see +// StrictValidation. +func WeakValidation(opts *options) { + opts.strict = false +} + +// Insecure is an Option that allows image references to be fetched without TLS. +func Insecure(opts *options) { + opts.insecure = true +} + +// OptionFn is a function that returns an option. +type OptionFn func() Option + +// WithDefaultRegistry sets the default registry that will be used if one is not +// provided. +func WithDefaultRegistry(r string) Option { + return func(opts *options) { + opts.defaultRegistry = r + } +} + +// WithDefaultTag sets the default tag that will be used if one is not provided. +func WithDefaultTag(t string) Option { + return func(opts *options) { + opts.defaultTag = t + } +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/ref.go new file mode 100644 index 0000000000..0a04867723 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -0,0 +1,75 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" +) + +// Reference defines the interface that consumers use when they can +// take either a tag or a digest. +type Reference interface { + fmt.Stringer + + // Context accesses the Repository context of the reference. + Context() Repository + + // Identifier accesses the type-specific portion of the reference. + Identifier() string + + // Name is the fully-qualified reference name. + Name() string + + // Scope is the scope needed to access this reference. + Scope(string) string +} + +// ParseReference parses the string as a reference, either by tag or digest. +func ParseReference(s string, opts ...Option) (Reference, error) { + if t, err := NewTag(s, opts...); err == nil { + return t, nil + } + if d, err := NewDigest(s, opts...); err == nil { + return d, nil + } + return nil, newErrBadName("could not parse reference: %s", s) +} + +type stringConst string + +// MustParseReference behaves like ParseReference, but panics instead of +// returning an error. It's intended for use in tests, or when a value is +// expected to be valid at code authoring time. +// +// To discourage its use in scenarios where the value is not known at code +// authoring time, it must be passed a string constant: +// +// const str = "valid/string" +// MustParseReference(str) +// MustParseReference("another/valid/string") +// MustParseReference(str + "/and/more") +// +// These will not compile: +// +// var str = "valid/string" +// MustParseReference(str) +// MustParseReference(strings.Join([]string{"valid", "string"}, "/")) +func MustParseReference(s stringConst, opts ...Option) Reference { + ref, err := ParseReference(string(s), opts...) + if err != nil { + panic(err) + } + return ref +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/registry.go new file mode 100644 index 0000000000..5e6b6e62a0 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -0,0 +1,179 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "encoding" + "encoding/json" + "net" + "net/url" + "path" + "regexp" + "strings" +) + +// Detect more complex forms of local references. +var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) + +// Detect the loopback IP (127.0.0.1) +var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) + +// Detect the loopback IPV6 (::1) +var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) + +// Registry stores a docker registry name in a structured form. +type Registry struct { + insecure bool + registry string +} + +var _ encoding.TextMarshaler = (*Registry)(nil) +var _ encoding.TextUnmarshaler = (*Registry)(nil) +var _ json.Marshaler = (*Registry)(nil) +var _ json.Unmarshaler = (*Registry)(nil) + +// RegistryStr returns the registry component of the Registry. +func (r Registry) RegistryStr() string { + return r.registry +} + +// Name returns the name from which the Registry was derived. +func (r Registry) Name() string { + return r.RegistryStr() +} + +func (r Registry) String() string { + return r.Name() +} + +// Repo returns a Repository in the Registry with the given name. +func (r Registry) Repo(repo ...string) Repository { + return Repository{Registry: r, repository: path.Join(repo...)} +} + +// Scope returns the scope required to access the registry. +func (r Registry) Scope(string) string { + // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z + return "registry:catalog:*" +} + +func (r Registry) isRFC1918() bool { + ipStr := strings.Split(r.Name(), ":")[0] + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { + _, block, _ := net.ParseCIDR(cidr) + if block.Contains(ip) { + return true + } + } + return false +} + +// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. +func (r Registry) Scheme() string { + if r.insecure { + return "http" + } + if r.isRFC1918() { + return "http" + } + if strings.HasPrefix(r.Name(), "localhost:") { + return "http" + } + if reLocal.MatchString(r.Name()) { + return "http" + } + if reLoopback.MatchString(r.Name()) { + return "http" + } + if reipv6Loopback.MatchString(r.Name()) { + return "http" + } + return "https" +} + +func checkRegistry(name string) error { + // Per RFC 3986, registries (authorities) are required to be prefixed with "//" + // url.Host == hostname[:port] == authority + if url, err := url.Parse("//" + name); err != nil || url.Host != name { + return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) + } + return nil +} + +// NewRegistry returns a Registry based on the given name. +// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. +func NewRegistry(name string, opts ...Option) (Registry, error) { + opt := makeOptions(opts...) + if opt.strict && len(name) == 0 { + return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined") + } + + if err := checkRegistry(name); err != nil { + return Registry{}, err + } + + if name == "" { + name = opt.defaultRegistry + } + // Rewrite "docker.io" to "index.docker.io". + // See: https://github.com/google/go-containerregistry/issues/68 + if name == defaultRegistryAlias { + name = DefaultRegistry + } + + return Registry{registry: name, insecure: opt.insecure}, nil +} + +// NewInsecureRegistry returns an Insecure Registry based on the given name. +// +// Deprecated: Use the Insecure Option with NewRegistry instead. +func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { + opts = append(opts, Insecure) + return NewRegistry(name, opts...) +} + +// MarshalJSON formats the Registry into a string for JSON serialization. +func (r Registry) MarshalJSON() ([]byte, error) { return json.Marshal(r.String()) } + +// UnmarshalJSON parses a JSON string into a Registry. +func (r *Registry) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewRegistry(s) + if err != nil { + return err + } + *r = n + return nil +} + +// MarshalText formats the registry into a string for text serialization. +func (r Registry) MarshalText() ([]byte, error) { return []byte(r.String()), nil } + +// UnmarshalText parses a text string into a Registry. +func (r *Registry) UnmarshalText(data []byte) error { + n, err := NewRegistry(string(data)) + if err != nil { + return err + } + *r = n + return nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/repository.go new file mode 100644 index 0000000000..290797575e --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/repository.go @@ -0,0 +1,158 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "encoding" + "encoding/json" + "fmt" + "strings" +) + +const ( + defaultNamespace = "library" + repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" + regRepoDelimiter = "/" +) + +// Repository stores a docker repository name in a structured form. +type Repository struct { + Registry + repository string +} + +var _ encoding.TextMarshaler = (*Repository)(nil) +var _ encoding.TextUnmarshaler = (*Repository)(nil) +var _ json.Marshaler = (*Repository)(nil) +var _ json.Unmarshaler = (*Repository)(nil) + +// See https://docs.docker.com/docker-hub/official_repos +func hasImplicitNamespace(repo string, reg Registry) bool { + return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry +} + +// RepositoryStr returns the repository component of the Repository. +func (r Repository) RepositoryStr() string { + if hasImplicitNamespace(r.repository, r.Registry) { + return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) + } + return r.repository +} + +// Name returns the name from which the Repository was derived. +func (r Repository) Name() string { + regName := r.Registry.Name() + if regName != "" { + return regName + regRepoDelimiter + r.RepositoryStr() + } + // TODO: As far as I can tell, this is unreachable. + return r.RepositoryStr() +} + +func (r Repository) String() string { + return r.Name() +} + +// Scope returns the scope required to perform the given action on the registry. +// TODO(jonjohnsonjr): consider moving scopes to a separate package. +func (r Repository) Scope(action string) string { + return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) +} + +func checkRepository(repository string) error { + return checkElement("repository", repository, repositoryChars, 2, 255) +} + +// NewRepository returns a new Repository representing the given name, according to the given strictness. +func NewRepository(name string, opts ...Option) (Repository, error) { + opt := makeOptions(opts...) + if len(name) == 0 { + return Repository{}, newErrBadName("a repository name must be specified") + } + + var registry string + repo := name + parts := strings.SplitN(name, regRepoDelimiter, 2) + if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { + // The first part of the repository is treated as the registry domain + // iff it contains a '.' or ':' character, otherwise it is all repository + // and the domain defaults to Docker Hub. + registry = parts[0] + repo = parts[1] + } + + if err := checkRepository(repo); err != nil { + return Repository{}, err + } + + reg, err := NewRegistry(registry, opts...) + if err != nil { + return Repository{}, err + } + if hasImplicitNamespace(repo, reg) && opt.strict { + return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')") + } + return Repository{reg, repo}, nil +} + +// Tag returns a Tag in this Repository. +func (r Repository) Tag(identifier string) Tag { + t := Tag{ + tag: identifier, + Repository: r, + } + t.original = t.Name() + return t +} + +// Digest returns a Digest in this Repository. +func (r Repository) Digest(identifier string) Digest { + d := Digest{ + digest: identifier, + Repository: r, + } + d.original = d.Name() + return d +} + +// MarshalJSON formats the Repository into a string for JSON serialization. +func (r Repository) MarshalJSON() ([]byte, error) { return json.Marshal(r.String()) } + +// UnmarshalJSON parses a JSON string into a Repository. +func (r *Repository) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewRepository(s) + if err != nil { + return err + } + *r = n + return nil +} + +// MarshalText formats the repository name into a string for text serialization. +func (r Repository) MarshalText() ([]byte, error) { return []byte(r.String()), nil } + +// UnmarshalText parses a text string into a Repository. +func (r *Repository) UnmarshalText(data []byte) error { + n, err := NewRepository(string(data)) + if err != nil { + return err + } + *r = n + return nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/tag.go new file mode 100644 index 0000000000..cfa923f59d --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/name/tag.go @@ -0,0 +1,146 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "encoding" + "encoding/json" + "strings" +) + +const ( + // TODO(dekkagaijin): use the docker/distribution regexes for validation. + tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" + tagDelim = ":" +) + +// Tag stores a docker tag name in a structured form. +type Tag struct { + Repository + tag string + original string +} + +var _ Reference = (*Tag)(nil) +var _ encoding.TextMarshaler = (*Tag)(nil) +var _ encoding.TextUnmarshaler = (*Tag)(nil) +var _ json.Marshaler = (*Tag)(nil) +var _ json.Unmarshaler = (*Tag)(nil) + +// Context implements Reference. +func (t Tag) Context() Repository { + return t.Repository +} + +// Identifier implements Reference. +func (t Tag) Identifier() string { + return t.TagStr() +} + +// TagStr returns the tag component of the Tag. +func (t Tag) TagStr() string { + return t.tag +} + +// Name returns the name from which the Tag was derived. +func (t Tag) Name() string { + return t.Repository.Name() + tagDelim + t.TagStr() +} + +// String returns the original input string. +func (t Tag) String() string { + return t.original +} + +// Scope returns the scope required to perform the given action on the tag. +func (t Tag) Scope(action string) string { + return t.Repository.Scope(action) +} + +func checkTag(name string) error { + return checkElement("tag", name, tagChars, 1, 128) +} + +// NewTag returns a new Tag representing the given name, according to the given strictness. +func NewTag(name string, opts ...Option) (Tag, error) { + opt := makeOptions(opts...) + base := name + tag := "" + + // Split on ":" + parts := strings.Split(name, tagDelim) + // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { + base = strings.Join(parts[:len(parts)-1], tagDelim) + tag = parts[len(parts)-1] + if tag == "" { + return Tag{}, newErrBadName("%s must specify a tag name after the colon", name) + } + } + + // We don't require a tag, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the tag regardless in case + // it's empty. + if tag != "" || opt.strict { + if err := checkTag(tag); err != nil { + return Tag{}, err + } + } + + if tag == "" { + tag = opt.defaultTag + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Tag{}, err + } + return Tag{ + Repository: repo, + tag: tag, + original: name, + }, nil +} + +// MarshalJSON formats the Tag into a string for JSON serialization. +func (t Tag) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } + +// UnmarshalJSON parses a JSON string into a Tag. +func (t *Tag) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewTag(s) + if err != nil { + return err + } + *t = n + return nil +} + +// MarshalText formats the tag into a string for text serialization. +func (t Tag) MarshalText() ([]byte, error) { return []byte(t.String()), nil } + +// UnmarshalText parses a text string into a Tag. +func (t *Tag) UnmarshalText(data []byte) error { + n, err := NewTag(string(data)) + if err != nil { + return err + } + *t = n + return nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/config.go new file mode 100644 index 0000000000..b62d848269 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + "time" +) + +// ConfigFile is the configuration file that holds the metadata describing +// how to launch a container. See: +// https://github.com/opencontainers/image-spec/blob/master/config.md +// +// docker_version and os.version are not part of the spec but included +// for backwards compatibility. +type ConfigFile struct { + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + // Deprecated: This field is deprecated and will be removed in the next release. + DockerVersion string `json:"docker_version,omitempty"` + History []History `json:"history,omitempty"` + OS string `json:"os"` + RootFS RootFS `json:"rootfs"` + Config Config `json:"config"` + OSVersion string `json:"os.version,omitempty"` + Variant string `json:"variant,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Platform attempts to generates a Platform from the ConfigFile fields. +func (cf *ConfigFile) Platform() *Platform { + if cf.OS == "" && cf.Architecture == "" && cf.OSVersion == "" && cf.Variant == "" && len(cf.OSFeatures) == 0 { + return nil + } + return &Platform{ + OS: cf.OS, + Architecture: cf.Architecture, + OSVersion: cf.OSVersion, + Variant: cf.Variant, + OSFeatures: cf.OSFeatures, + } +} + +// History is one entry of a list recording how this container image was built. +type History struct { + Author string `json:"author,omitempty"` + Created Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Time is a wrapper around time.Time to help with deep copying +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// RootFS holds the ordered list of file system deltas that comprise the +// container image's root filesystem. +type RootFS struct { + Type string `json:"type"` + DiffIDs []Hash `json:"diff_ids"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config is a submessage of the config file described as: +// +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// +// The names of the fields in this message are chosen to reflect the JSON +// payload of the Config as defined here: +// https://git.io/vrAET +// and +// https://github.com/opencontainers/image-spec/blob/master/config.md +type Config struct { + AttachStderr bool `json:"AttachStderr,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + Domainname string `json:"Domainname,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + Env []string `json:"Env,omitempty"` + Hostname string `json:"Hostname,omitempty"` + Image string `json:"Image,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Tty bool `json:"Tty,omitempty"` + User string `json:"User,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + StopSignal string `json:"StopSignal,omitempty"` + Shell []string `json:"Shell,omitempty"` +} + +// ParseConfigFile parses the io.Reader's contents into a ConfigFile. +func ParseConfigFile(r io.Reader) (*ConfigFile, error) { + cf := ConfigFile{} + if err := json.NewDecoder(r).Decode(&cf); err != nil { + return nil, err + } + return &cf, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go new file mode 100644 index 0000000000..7a84736be2 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package v1 defines structured types for OCI v1 images +package v1 diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md new file mode 100644 index 0000000000..8663a830fd --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md @@ -0,0 +1,8 @@ +# `empty` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty) + +The empty packages provides an empty base for constructing a `v1.Image` or `v1.ImageIndex`. +This is especially useful when paired with the [`mutate`](/pkg/v1/mutate) package, +see [`mutate.Append`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#Append) +and [`mutate.AppendManifests`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#AppendManifests). diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go new file mode 100644 index 0000000000..1a521e9a74 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package empty provides an implementation of v1.Image equivalent to "FROM scratch". +package empty diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go new file mode 100644 index 0000000000..c58a06ce02 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image is a singleton empty image, think: FROM scratch. +var Image, _ = partial.UncompressedToImage(emptyImage{}) + +type emptyImage struct{} + +// MediaType implements partial.UncompressedImageCore. +func (i emptyImage) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// RawConfigFile implements partial.UncompressedImageCore. +func (i emptyImage) RawConfigFile() ([]byte, error) { + return partial.RawConfigFile(i) +} + +// ConfigFile implements v1.Image. +func (i emptyImage) ConfigFile() (*v1.ConfigFile, error) { + return &v1.ConfigFile{ + RootFS: v1.RootFS{ + // Some clients check this. + Type: "layers", + }, + }, nil +} + +func (i emptyImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + return nil, fmt.Errorf("LayerByDiffID(%s): empty image", h) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go new file mode 100644 index 0000000000..18b414891b --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go @@ -0,0 +1,65 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "encoding/json" + "errors" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Index is a singleton empty index, think: FROM scratch. +var Index = emptyIndex{} + +type emptyIndex struct{} + +func (i emptyIndex) MediaType() (types.MediaType, error) { + return types.OCIImageIndex, nil +} + +func (i emptyIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i emptyIndex) Size() (int64, error) { + return partial.Size(i) +} + +func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) { + return base(), nil +} + +func (i emptyIndex) RawManifest() ([]byte, error) { + return json.Marshal(base()) +} + +func (i emptyIndex) Image(v1.Hash) (v1.Image, error) { + return nil, errors.New("empty index") +} + +func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { + return nil, errors.New("empty index") +} + +func base() *v1.IndexManifest { + return &v1.IndexManifest{ + SchemaVersion: 2, + MediaType: types.OCIImageIndex, + Manifests: []v1.Descriptor{}, + } +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go new file mode 100644 index 0000000000..d81593bd59 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -0,0 +1,122 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "crypto" + "encoding" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "strings" +) + +// Hash is an unqualified digest of some content, e.g. sha256:deadbeef +type Hash struct { + // Algorithm holds the algorithm used to compute the hash. + Algorithm string + + // Hex holds the hex portion of the content hash. + Hex string +} + +var _ encoding.TextMarshaler = (*Hash)(nil) +var _ encoding.TextUnmarshaler = (*Hash)(nil) +var _ json.Marshaler = (*Hash)(nil) +var _ json.Unmarshaler = (*Hash)(nil) + +// String reverses NewHash returning the string-form of the hash. +func (h Hash) String() string { + return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) +} + +// NewHash validates the input string is a hash and returns a strongly type Hash object. +func NewHash(s string) (Hash, error) { + h := Hash{} + if err := h.parse(s); err != nil { + return Hash{}, err + } + return h, nil +} + +// MarshalJSON implements json.Marshaler +func (h Hash) MarshalJSON() ([]byte, error) { return json.Marshal(h.String()) } + +// UnmarshalJSON implements json.Unmarshaler +func (h *Hash) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + return h.parse(s) +} + +// MarshalText implements encoding.TextMarshaler. This is required to use +// v1.Hash as a key in a map when marshalling JSON. +func (h Hash) MarshalText() ([]byte, error) { return []byte(h.String()), nil } + +// UnmarshalText implements encoding.TextUnmarshaler. This is required to use +// v1.Hash as a key in a map when unmarshalling JSON. +func (h *Hash) UnmarshalText(text []byte) error { return h.parse(string(text)) } + +// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") +func Hasher(name string) (hash.Hash, error) { + switch name { + case "sha256": + return crypto.SHA256.New(), nil + default: + return nil, fmt.Errorf("unsupported hash: %q", name) + } +} + +func (h *Hash) parse(unquoted string) error { + parts := strings.Split(unquoted, ":") + if len(parts) != 2 { + return fmt.Errorf("cannot parse hash: %q", unquoted) + } + + rest := strings.TrimLeft(parts[1], "0123456789abcdef") + if len(rest) != 0 { + return fmt.Errorf("found non-hex character in hash: %c", rest[0]) + } + + hasher, err := Hasher(parts[0]) + if err != nil { + return err + } + // Compare the hex to the expected size (2 hex characters per byte) + if len(parts[1]) != hasher.Size()*2 { + return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + } + + h.Algorithm = parts[0] + h.Hex = parts[1] + return nil +} + +// SHA256 computes the Hash of the provided io.Reader's content. +func SHA256(r io.Reader) (Hash, int64, error) { + hasher := crypto.SHA256.New() + n, err := io.Copy(hasher, r) + if err != nil { + return Hash{}, 0, err + } + return Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + }, n, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/image.go new file mode 100644 index 0000000000..8de9e47645 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image defines the interface for interacting with an OCI v1 image. +type Image interface { + // Layers returns the ordered collection of filesystem layers that comprise this image. + // The order of the list is oldest/base layer first, and most-recent/top layer last. + Layers() ([]Layer, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // ConfigName returns the hash of the image's config file, also known as + // the Image ID. + ConfigName() (Hash, error) + + // ConfigFile returns this image's config file. + ConfigFile() (*ConfigFile, error) + + // RawConfigFile returns the serialized bytes of ConfigFile(). + RawConfigFile() ([]byte, error) + + // Digest returns the sha256 of this image's manifest. + Digest() (Hash, error) + + // Manifest returns this image's Manifest object. + Manifest() (*Manifest, error) + + // RawManifest returns the serialized bytes of Manifest() + RawManifest() ([]byte, error) + + // LayerByDigest returns a Layer for interacting with a particular layer of + // the image, looking it up by "digest" (the compressed hash). + LayerByDigest(Hash) (Layer, error) + + // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" + // (the uncompressed hash). + LayerByDiffID(Hash) (Layer, error) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/index.go new file mode 100644 index 0000000000..8e7bc8ebb3 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageIndex defines the interface for interacting with an OCI image index. +type ImageIndex interface { + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Digest returns the sha256 of this index's manifest. + Digest() (Hash, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // IndexManifest returns this image index's manifest object. + IndexManifest() (*IndexManifest, error) + + // RawManifest returns the serialized bytes of IndexManifest(). + RawManifest() ([]byte, error) + + // Image returns a v1.Image that this ImageIndex references. + Image(Hash) (Image, error) + + // ImageIndex returns a v1.ImageIndex that this ImageIndex references. + ImageIndex(Hash) (ImageIndex, error) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go new file mode 100644 index 0000000000..57447d263d --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Layer is an interface for accessing the properties of a particular layer of a v1.Image +type Layer interface { + // Digest returns the Hash of the compressed layer. + Digest() (Hash, error) + + // DiffID returns the Hash of the uncompressed layer. + DiffID() (Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // MediaType returns the media type of the Layer. + MediaType() (types.MediaType, error) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go new file mode 100644 index 0000000000..22d483f3bd --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Manifest represents the OCI image manifest in a structured way. +type Manifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` + Subject *Descriptor `json:"subject,omitempty"` +} + +// IndexManifest represents an OCI image index in a structured way. +type IndexManifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Manifests []Descriptor `json:"manifests"` + Annotations map[string]string `json:"annotations,omitempty"` + Subject *Descriptor `json:"subject,omitempty"` +} + +// Descriptor holds a reference from the manifest to one of its constituent elements. +type Descriptor struct { + MediaType types.MediaType `json:"mediaType"` + Size int64 `json:"size"` + Digest Hash `json:"digest"` + Data []byte `json:"data,omitempty"` + URLs []string `json:"urls,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Platform *Platform `json:"platform,omitempty"` + ArtifactType string `json:"artifactType,omitempty"` +} + +// ParseManifest parses the io.Reader's contents into a Manifest. +func ParseManifest(r io.Reader) (*Manifest, error) { + m := Manifest{} + if err := json.NewDecoder(r).Decode(&m); err != nil { + return nil, err + } + return &m, nil +} + +// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. +func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { + im := IndexManifest{} + if err := json.NewDecoder(r).Decode(&im); err != nil { + return nil, err + } + return &im, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go new file mode 100644 index 0000000000..98b1ff9094 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go @@ -0,0 +1,92 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package match provides functionality for conveniently matching a v1.Descriptor. +package match + +import ( + v1 "github.com/google/go-containerregistry/pkg/v1" + imagespec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Matcher function that is given a v1.Descriptor, and returns whether or +// not it matches a given rule. Can match on anything it wants in the Descriptor. +type Matcher func(desc v1.Descriptor) bool + +// Name returns a match.Matcher that matches based on the value of the +// +// "org.opencontainers.image.ref.name" annotation: +// +// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys +func Name(name string) Matcher { + return Annotation(imagespec.AnnotationRefName, name) +} + +// Annotation returns a match.Matcher that matches based on the provided annotation. +func Annotation(key, value string) Matcher { + return func(desc v1.Descriptor) bool { + if desc.Annotations == nil { + return false + } + if aValue, ok := desc.Annotations[key]; ok && aValue == value { + return true + } + return false + } +} + +// Platforms returns a match.Matcher that matches on any one of the provided platforms. +// Ignores any descriptors that do not have a platform. +func Platforms(platforms ...v1.Platform) Matcher { + return func(desc v1.Descriptor) bool { + if desc.Platform == nil { + return false + } + for _, platform := range platforms { + if desc.Platform.Equals(platform) { + return true + } + } + return false + } +} + +// MediaTypes returns a match.Matcher that matches at least one of the provided media types. +func MediaTypes(mediaTypes ...string) Matcher { + mts := map[string]bool{} + for _, media := range mediaTypes { + mts[media] = true + } + return func(desc v1.Descriptor) bool { + if desc.MediaType == "" { + return false + } + if _, ok := mts[string(desc.MediaType)]; ok { + return true + } + return false + } +} + +// Digests returns a match.Matcher that matches at least one of the provided Digests +func Digests(digests ...v1.Hash) Matcher { + digs := map[v1.Hash]bool{} + for _, digest := range digests { + digs[digest] = true + } + return func(desc v1.Descriptor) bool { + _, ok := digs[desc.Digest] + return ok + } +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md new file mode 100644 index 0000000000..19e1612433 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md @@ -0,0 +1,56 @@ +# `mutate` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate) + +The `v1.Image`, `v1.ImageIndex`, and `v1.Layer` interfaces provide only +accessor methods, so they are essentially immutable. If you want to change +something about them, you need to produce a new instance of that interface. + +A common use case for this library is to read an image from somewhere (a source), +change something about it, and write the image somewhere else (a sink). + +Graphically, this looks something like: + +

+ +

+ +## Mutations + +This is obviously not a comprehensive set of useful transformations (PRs welcome!), +but a rough summary of what the `mutate` package currently does: + +### `Config` and `ConfigFile` + +These allow you to change the [image configuration](https://github.com/opencontainers/image-spec/blob/master/config.md#properties), +e.g. to change the entrypoint, environment, author, etc. + +### `Time`, `Canonical`, and `CreatedAt` + +These are useful in the context of [reproducible builds](https://reproducible-builds.org/), +where you may want to strip timestamps and other non-reproducible information. + +### `Append`, `AppendLayers`, and `AppendManifests` + +These functions allow the extension of a `v1.Image` or `v1.ImageIndex` with +new layers or manifests. + +For constructing an image `FROM scratch`, see the [`empty`](/pkg/v1/empty) package. + +### `MediaType` and `IndexMediaType` + +Sometimes, it is necessary to change the media type of an image or index, +e.g. to appease a registry with strict validation of images (_looking at you, GCR_). + +### `Rebase` + +Rebase has [its own README](/cmd/crane/rebase.md). + +This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_rebase.md). + +### `Extract` + +Extract will flatten an image filesystem into a single tar stream, +respecting whiteout files. + +This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_export.md). diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go new file mode 100644 index 0000000000..dfbd9951e0 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mutate provides facilities for mutating v1.Images of any kind. +package mutate diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go new file mode 100644 index 0000000000..3ea27fe47f --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go @@ -0,0 +1,293 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "bytes" + "encoding/json" + "errors" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type image struct { + base v1.Image + adds []Addendum + + computed bool + configFile *v1.ConfigFile + manifest *v1.Manifest + annotations map[string]string + mediaType *types.MediaType + configMediaType *types.MediaType + diffIDMap map[v1.Hash]v1.Layer + digestMap map[v1.Hash]v1.Layer + subject *v1.Descriptor + + sync.Mutex +} + +var _ v1.Image = (*image)(nil) + +func (i *image) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *image) compute() error { + i.Lock() + defer i.Unlock() + + // Don't re-compute if already computed. + if i.computed { + return nil + } + var configFile *v1.ConfigFile + if i.configFile != nil { + configFile = i.configFile + } else { + cf, err := i.base.ConfigFile() + if err != nil { + return err + } + configFile = cf.DeepCopy() + } + diffIDs := configFile.RootFS.DiffIDs + history := configFile.History + + diffIDMap := make(map[v1.Hash]v1.Layer) + digestMap := make(map[v1.Hash]v1.Layer) + + for _, add := range i.adds { + history = append(history, add.History) + if add.Layer != nil { + diffID, err := add.Layer.DiffID() + if err != nil { + return err + } + diffIDs = append(diffIDs, diffID) + diffIDMap[diffID] = add.Layer + } + } + + m, err := i.base.Manifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifestLayers := manifest.Layers + for _, add := range i.adds { + if add.Layer == nil { + // Empty layers include only history in manifest. + continue + } + + desc, err := partial.Descriptor(add.Layer) + if err != nil { + return err + } + + // Fields in the addendum override the original descriptor. + if len(add.Annotations) != 0 { + desc.Annotations = add.Annotations + } + if len(add.URLs) != 0 { + desc.URLs = add.URLs + } + + if add.MediaType != "" { + desc.MediaType = add.MediaType + } + + manifestLayers = append(manifestLayers, *desc) + digestMap[desc.Digest] = add.Layer + } + + configFile.RootFS.DiffIDs = diffIDs + configFile.History = history + + manifest.Layers = manifestLayers + + rcfg, err := json.Marshal(configFile) + if err != nil { + return err + } + d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) + if err != nil { + return err + } + manifest.Config.Digest = d + manifest.Config.Size = sz + + // If Data was set in the base image, we need to update it in the mutated image. + if m.Config.Data != nil { + manifest.Config.Data = rcfg + } + + // If the user wants to mutate the media type of the config + if i.configMediaType != nil { + manifest.Config.MediaType = *i.configMediaType + } + + if i.mediaType != nil { + manifest.MediaType = *i.mediaType + } + + if i.annotations != nil { + if manifest.Annotations == nil { + manifest.Annotations = map[string]string{} + } + + for k, v := range i.annotations { + manifest.Annotations[k] = v + } + } + manifest.Subject = i.subject + + i.configFile = configFile + i.manifest = manifest + i.diffIDMap = diffIDMap + i.digestMap = digestMap + i.computed = true + return nil +} + +// Layers returns the ordered collection of filesystem layers that comprise this image. +// The order of the list is oldest/base layer first, and most-recent/top layer last. +func (i *image) Layers() ([]v1.Layer, error) { + if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { + // Image contains a streamable layer which has not yet been + // consumed. Just return the layers we have in case the caller + // is going to consume the layers. + layers, err := i.base.Layers() + if err != nil { + return nil, err + } + for _, add := range i.adds { + layers = append(layers, add.Layer) + } + return layers, nil + } else if err != nil { + return nil, err + } + + diffIDs, err := partial.DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// ConfigName returns the hash of the image's config file. +func (i *image) ConfigName() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.ConfigName(i) +} + +// ConfigFile returns this image's config file. +func (i *image) ConfigFile() (*v1.ConfigFile, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.configFile.DeepCopy(), nil +} + +// RawConfigFile returns the serialized bytes of ConfigFile() +func (i *image) RawConfigFile() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.configFile) +} + +// Digest returns the sha256 of this image's manifest. +func (i *image) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Size implements v1.Image. +func (i *image) Size() (int64, error) { + if err := i.compute(); err != nil { + return -1, err + } + return partial.Size(i) +} + +// Manifest returns this image's Manifest object. +func (i *image) Manifest() (*v1.Manifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest.DeepCopy(), nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *image) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} + +// LayerByDigest returns a Layer for interacting with a particular layer of +// the image, looking it up by "digest" (the compressed hash). +func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { + if cn, err := i.ConfigName(); err != nil { + return nil, err + } else if h == cn { + return partial.ConfigLayer(i) + } + if layer, ok := i.digestMap[h]; ok { + return layer, nil + } + return i.base.LayerByDigest(h) +} + +// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" +// (the uncompressed hash). +func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.diffIDMap[h]; ok { + return layer, nil + } + return i.base.LayerByDiffID(h) +} + +func validate(adds []Addendum) error { + for _, add := range adds { + if add.Layer == nil && !add.History.EmptyLayer { + return errors.New("unable to add a nil layer to the image") + } + } + return nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go new file mode 100644 index 0000000000..a6fdaceed5 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go @@ -0,0 +1,232 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +func computeDescriptor(ia IndexAddendum) (*v1.Descriptor, error) { + desc, err := partial.Descriptor(ia.Add) + if err != nil { + return nil, err + } + + // The IndexAddendum allows overriding Descriptor values. + if ia.Size != 0 { + desc.Size = ia.Size + } + if string(ia.MediaType) != "" { + desc.MediaType = ia.MediaType + } + if ia.Digest != (v1.Hash{}) { + desc.Digest = ia.Digest + } + if ia.Platform != nil { + desc.Platform = ia.Platform + } + if len(ia.URLs) != 0 { + desc.URLs = ia.URLs + } + if len(ia.Annotations) != 0 { + desc.Annotations = ia.Annotations + } + if ia.Data != nil { + desc.Data = ia.Data + } + + return desc, nil +} + +type index struct { + base v1.ImageIndex + adds []IndexAddendum + // remove is removed before adds + remove match.Matcher + + computed bool + manifest *v1.IndexManifest + annotations map[string]string + mediaType *types.MediaType + imageMap map[v1.Hash]v1.Image + indexMap map[v1.Hash]v1.ImageIndex + layerMap map[v1.Hash]v1.Layer + subject *v1.Descriptor + + sync.Mutex +} + +var _ v1.ImageIndex = (*index)(nil) + +func (i *index) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *index) Size() (int64, error) { return partial.Size(i) } + +func (i *index) compute() error { + i.Lock() + defer i.Unlock() + + // Don't re-compute if already computed. + if i.computed { + return nil + } + + i.imageMap = make(map[v1.Hash]v1.Image) + i.indexMap = make(map[v1.Hash]v1.ImageIndex) + i.layerMap = make(map[v1.Hash]v1.Layer) + + m, err := i.base.IndexManifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifests := manifest.Manifests + + if i.remove != nil { + var cleanedManifests []v1.Descriptor + for _, m := range manifests { + if !i.remove(m) { + cleanedManifests = append(cleanedManifests, m) + } + } + manifests = cleanedManifests + } + + for _, add := range i.adds { + desc, err := computeDescriptor(add) + if err != nil { + return err + } + + manifests = append(manifests, *desc) + if idx, ok := add.Add.(v1.ImageIndex); ok { + i.indexMap[desc.Digest] = idx + } else if img, ok := add.Add.(v1.Image); ok { + i.imageMap[desc.Digest] = img + } else if l, ok := add.Add.(v1.Layer); ok { + i.layerMap[desc.Digest] = l + } else { + logs.Warn.Printf("Unexpected index addendum: %T", add.Add) + } + } + + manifest.Manifests = manifests + + if i.mediaType != nil { + manifest.MediaType = *i.mediaType + } + + if i.annotations != nil { + if manifest.Annotations == nil { + manifest.Annotations = map[string]string{} + } + for k, v := range i.annotations { + manifest.Annotations[k] = v + } + } + manifest.Subject = i.subject + + i.manifest = manifest + i.computed = true + return nil +} + +func (i *index) Image(h v1.Hash) (v1.Image, error) { + if img, ok := i.imageMap[h]; ok { + return img, nil + } + return i.base.Image(h) +} + +func (i *index) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + if idx, ok := i.indexMap[h]; ok { + return idx, nil + } + return i.base.ImageIndex(h) +} + +type withLayer interface { + Layer(v1.Hash) (v1.Layer, error) +} + +// Workaround for #819. +func (i *index) Layer(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.layerMap[h]; ok { + return layer, nil + } + if wl, ok := i.base.(withLayer); ok { + return wl.Layer(h) + } + return nil, fmt.Errorf("layer not found: %s", h) +} + +// Digest returns the sha256 of this image's manifest. +func (i *index) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Manifest returns this image's Manifest object. +func (i *index) IndexManifest() (*v1.IndexManifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest.DeepCopy(), nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *index) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} + +func (i *index) Manifests() ([]partial.Describable, error) { + if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { + // Index contains a streamable layer which has not yet been + // consumed. Just return the manifests we have in case the caller + // is going to consume the streamable layers. + manifests, err := partial.Manifests(i.base) + if err != nil { + return nil, err + } + for _, add := range i.adds { + manifests = append(manifests, add.Add) + } + return manifests, nil + } else if err != nil { + return nil, err + } + + return partial.ComputeManifests(i) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go new file mode 100644 index 0000000000..409877bce0 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -0,0 +1,546 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "maps" + "path/filepath" + "strings" + "time" + + "github.com/google/go-containerregistry/internal/gzip" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +const whiteoutPrefix = ".wh." + +// Addendum contains layers and history to be appended +// to a base image +type Addendum struct { + Layer v1.Layer + History v1.History + URLs []string + Annotations map[string]string + MediaType types.MediaType +} + +// AppendLayers applies layers to a base image. +func AppendLayers(base v1.Image, layers ...v1.Layer) (v1.Image, error) { + additions := make([]Addendum, 0, len(layers)) + for _, layer := range layers { + additions = append(additions, Addendum{Layer: layer}) + } + + return Append(base, additions...) +} + +// Append will apply the list of addendums to the base image +func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { + if len(adds) == 0 { + return base, nil + } + if err := validate(adds); err != nil { + return nil, err + } + + return &image{ + base: base, + adds: adds, + }, nil +} + +// Appendable is an interface that represents something that can be appended +// to an ImageIndex. We need to be able to construct a v1.Descriptor in order +// to append something, and this is the minimum required information for that. +type Appendable interface { + MediaType() (types.MediaType, error) + Digest() (v1.Hash, error) + Size() (int64, error) +} + +// IndexAddendum represents an appendable thing and all the properties that +// we may want to override in the resulting v1.Descriptor. +type IndexAddendum struct { + Add Appendable + v1.Descriptor +} + +// AppendManifests appends a manifest to the ImageIndex. +func AppendManifests(base v1.ImageIndex, adds ...IndexAddendum) v1.ImageIndex { + return &index{ + base: base, + adds: adds, + } +} + +// RemoveManifests removes any descriptors that match the match.Matcher. +func RemoveManifests(base v1.ImageIndex, matcher match.Matcher) v1.ImageIndex { + return &index{ + base: base, + remove: matcher, + } +} + +// Config mutates the provided v1.Image to have the provided v1.Config +func Config(base v1.Image, cfg v1.Config) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cf.Config = cfg + + return ConfigFile(base, cf) +} + +// Subject mutates the subject on an image or index manifest. +// +// The input is expected to be a v1.Image or v1.ImageIndex, and +// returns the same type. You can type-assert the result like so: +// +// img := Subject(empty.Image, subj).(v1.Image) +// +// Or for an index: +// +// idx := Subject(empty.Index, subj).(v1.ImageIndex) +// +// If the input is not an Image or ImageIndex, the result will +// attempt to lazily annotate the raw manifest. +func Subject(f partial.WithRawManifest, subject v1.Descriptor) partial.WithRawManifest { + if img, ok := f.(v1.Image); ok { + return &image{ + base: img, + subject: &subject, + } + } + if idx, ok := f.(v1.ImageIndex); ok { + return &index{ + base: idx, + subject: &subject, + } + } + return arbitraryRawManifest{a: f, subject: &subject} +} + +// Annotations mutates the annotations on an annotatable image or index manifest. +// +// The annotatable input is expected to be a v1.Image or v1.ImageIndex, and +// returns the same type. You can type-assert the result like so: +// +// img := Annotations(empty.Image, map[string]string{ +// "foo": "bar", +// }).(v1.Image) +// +// Or for an index: +// +// idx := Annotations(empty.Index, map[string]string{ +// "foo": "bar", +// }).(v1.ImageIndex) +// +// If the input Annotatable is not an Image or ImageIndex, the result will +// attempt to lazily annotate the raw manifest. +func Annotations(f partial.WithRawManifest, anns map[string]string) partial.WithRawManifest { + if img, ok := f.(v1.Image); ok { + return &image{ + base: img, + annotations: maps.Clone(anns), + } + } + if idx, ok := f.(v1.ImageIndex); ok { + return &index{ + base: idx, + annotations: maps.Clone(anns), + } + } + return arbitraryRawManifest{a: f, anns: maps.Clone(anns)} +} + +type arbitraryRawManifest struct { + a partial.WithRawManifest + anns map[string]string + subject *v1.Descriptor +} + +func (a arbitraryRawManifest) RawManifest() ([]byte, error) { + b, err := a.a.RawManifest() + if err != nil { + return nil, err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + if ann, ok := m["annotations"]; ok { + if annm, ok := ann.(map[string]string); ok { + for k, v := range a.anns { + annm[k] = v + } + } else { + return nil, fmt.Errorf(".annotations is not a map: %T", ann) + } + } else { + m["annotations"] = a.anns + } + if a.subject != nil { + m["subject"] = a.subject + } + return json.Marshal(m) +} + +// ConfigFile mutates the provided v1.Image to have the provided v1.ConfigFile +func ConfigFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) { + m, err := base.Manifest() + if err != nil { + return nil, err + } + + image := &image{ + base: base, + manifest: m.DeepCopy(), + configFile: cfg, + } + + return image, nil +} + +// CreatedAt mutates the provided v1.Image to have the provided v1.Time +func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cfg := cf.DeepCopy() + cfg.Created = created + + return ConfigFile(base, cfg) +} + +// Extract takes an image and returns an io.ReadCloser containing the image's +// flattened filesystem. +// +// Callers can read the filesystem contents by passing the reader to +// tar.NewReader, or io.Copy it directly to some output. +// +// If a caller doesn't read the full contents, they should Close it to free up +// resources used during extraction. +func Extract(img v1.Image) io.ReadCloser { + pr, pw := io.Pipe() + + go func() { + // Close the writer with any errors encountered during + // extraction. These errors will be returned by the reader end + // on subsequent reads. If err == nil, the reader will return + // EOF. + pw.CloseWithError(extract(img, pw)) + }() + + return pr +} + +// Adapted from https://github.com/google/containerregistry/blob/da03b395ccdc4e149e34fbb540483efce962dc64/client/v2_2/docker_image_.py#L816 +func extract(img v1.Image, w io.Writer) error { + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + fileMap := map[string]bool{} + + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("retrieving image layers: %w", err) + } + + // we iterate through the layers in reverse order because it makes handling + // whiteout layers more efficient, since we can just keep track of the removed + // files as we see .wh. layers and ignore those in previous layers. + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + layerReader, err := layer.Uncompressed() + if err != nil { + return fmt.Errorf("reading layer contents: %w", err) + } + defer layerReader.Close() + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("reading tar: %w", err) + } + + // Some tools prepend everything with "./", so if we don't Clean the + // name, we may have duplicate entries, which angers tar-split. + header.Name = filepath.Clean(header.Name) + // force PAX format to remove Name/Linkname length limit of 100 characters + // required by USTAR and to not depend on internal tar package guess which + // prefers USTAR over PAX + header.Format = tar.FormatPAX + + basename := filepath.Base(header.Name) + dirname := filepath.Dir(header.Name) + tombstone := strings.HasPrefix(basename, whiteoutPrefix) + if tombstone { + basename = basename[len(whiteoutPrefix):] + } + + // check if we have seen value before + // if we're checking a directory, don't filepath.Join names + var name string + if header.Typeflag == tar.TypeDir { + name = header.Name + } else { + name = filepath.Join(dirname, basename) + } + + if _, ok := fileMap[name]; ok { + continue + } + + // check for a whited out parent directory + if inWhiteoutDir(fileMap, name) { + continue + } + + // mark file as handled. non-directory implicitly tombstones + // any entries with a matching (or child) name + fileMap[name] = tombstone || (header.Typeflag != tar.TypeDir) + if !tombstone { + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if header.Size > 0 { + if _, err := io.CopyN(tarWriter, tarReader, header.Size); err != nil { + return err + } + } + } + } + } + return nil +} + +func inWhiteoutDir(fileMap map[string]bool, file string) bool { + for file != "" { + dirname := filepath.Dir(file) + if file == dirname { + break + } + if val, ok := fileMap[dirname]; ok && val { + return true + } + file = dirname + } + return false +} + +// Time sets all timestamps in an image to the given timestamp. +func Time(img v1.Image, t time.Time) (v1.Image, error) { + newImage := empty.Image + + layers, err := img.Layers() + if err != nil { + return nil, fmt.Errorf("getting image layers: %w", err) + } + + ocf, err := img.ConfigFile() + if err != nil { + return nil, fmt.Errorf("getting original config file: %w", err) + } + + addendums := make([]Addendum, max(len(ocf.History), len(layers))) + var historyIdx, addendumIdx int + for layerIdx := 0; layerIdx < len(layers); addendumIdx, layerIdx = addendumIdx+1, layerIdx+1 { + newLayer, err := layerTime(layers[layerIdx], t) + if err != nil { + return nil, fmt.Errorf("setting layer times: %w", err) + } + + // try to search for the history entry that corresponds to this layer + for ; historyIdx < len(ocf.History); historyIdx++ { + addendums[addendumIdx].History = ocf.History[historyIdx] + // if it's an EmptyLayer, do not set the Layer and have the Addendum with just the History + // and move on to the next History entry + if ocf.History[historyIdx].EmptyLayer { + addendumIdx++ + continue + } + // otherwise, we can exit from the cycle + historyIdx++ + break + } + if addendumIdx < len(addendums) { + addendums[addendumIdx].Layer = newLayer + } + } + + // add all leftover History entries + for ; historyIdx < len(ocf.History); historyIdx, addendumIdx = historyIdx+1, addendumIdx+1 { + addendums[addendumIdx].History = ocf.History[historyIdx] + } + + newImage, err = Append(newImage, addendums...) + if err != nil { + return nil, fmt.Errorf("appending layers: %w", err) + } + + cf, err := newImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("setting config file: %w", err) + } + + cfg := cf.DeepCopy() + + // Copy basic config over + cfg.Architecture = ocf.Architecture + cfg.OS = ocf.OS + cfg.OSVersion = ocf.OSVersion + cfg.Config = ocf.Config + + // Strip away timestamps from the config file + cfg.Created = v1.Time{Time: t} + + for i, h := range cfg.History { + h.Created = v1.Time{Time: t} + h.CreatedBy = ocf.History[i].CreatedBy + h.Comment = ocf.History[i].Comment + h.EmptyLayer = ocf.History[i].EmptyLayer + // Explicitly ignore Author field; which hinders reproducibility + h.Author = "" + cfg.History[i] = h + } + + return ConfigFile(newImage, cfg) +} + +func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) { + layerReader, err := layer.Uncompressed() + if err != nil { + return nil, fmt.Errorf("getting layer: %w", err) + } + defer layerReader.Close() + w := new(bytes.Buffer) + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("reading layer: %w", err) + } + + header.ModTime = t + + //PAX and GNU Format support additional timestamps in the header + if header.Format == tar.FormatPAX || header.Format == tar.FormatGNU { + header.AccessTime = t + header.ChangeTime = t + } + + if err := tarWriter.WriteHeader(header); err != nil { + return nil, fmt.Errorf("writing tar header: %w", err) + } + + if header.Typeflag == tar.TypeReg { + // TODO(#1168): This should be lazy, and not buffer the entire layer contents. + if _, err = io.CopyN(tarWriter, tarReader, header.Size); err != nil { + return nil, fmt.Errorf("writing layer file: %w", err) + } + } + } + + if err := tarWriter.Close(); err != nil { + return nil, err + } + + b := w.Bytes() + // gzip the contents, then create the layer + opener := func() (io.ReadCloser, error) { + return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil + } + layer, err = tarball.LayerFromOpener(opener) + if err != nil { + return nil, fmt.Errorf("creating layer: %w", err) + } + + return layer, nil +} + +// Canonical is a helper function to combine Time and configFile +// to remove any randomness during a docker build. +func Canonical(img v1.Image) (v1.Image, error) { + // Set all timestamps to 0 + created := time.Time{} + img, err := Time(img, created) + if err != nil { + return nil, err + } + + cf, err := img.ConfigFile() + if err != nil { + return nil, err + } + + // Get rid of host-dependent random config + cfg := cf.DeepCopy() + + cfg.Container = "" + cfg.Config.Hostname = "" + cfg.DockerVersion = "" //nolint:staticcheck // Field will be removed in next release + + return ConfigFile(img, cfg) +} + +// MediaType modifies the MediaType() of the given image. +func MediaType(img v1.Image, mt types.MediaType) v1.Image { + return &image{ + base: img, + mediaType: &mt, + } +} + +// ConfigMediaType modifies the MediaType() of the given image's Config. +// +// If !mt.IsConfig(), this will be the image's artifactType in any indexes it's a part of. +func ConfigMediaType(img v1.Image, mt types.MediaType) v1.Image { + return &image{ + base: img, + configMediaType: &mt, + } +} + +// IndexMediaType modifies the MediaType() of the given index. +func IndexMediaType(idx v1.ImageIndex, mt types.MediaType) v1.ImageIndex { + return &index{ + base: idx, + mediaType: &mt, + } +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go new file mode 100644 index 0000000000..c606e0b76e --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" +) + +// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase. +func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) { + // Verify that oldBase's layers are present in orig, otherwise orig is + // not based on oldBase at all. + origLayers, err := orig.Layers() + if err != nil { + return nil, fmt.Errorf("failed to get layers for original: %w", err) + } + oldBaseLayers, err := oldBase.Layers() + if err != nil { + return nil, err + } + if len(oldBaseLayers) > len(origLayers) { + return nil, fmt.Errorf("image %q is not based on %q (too few layers)", orig, oldBase) + } + for i, l := range oldBaseLayers { + oldLayerDigest, err := l.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, oldBase, err) + } + origLayerDigest, err := origLayers[i].Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, orig, err) + } + if oldLayerDigest != origLayerDigest { + return nil, fmt.Errorf("image %q is not based on %q (layer %d mismatch)", orig, oldBase, i) + } + } + + oldConfig, err := oldBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for old base: %w", err) + } + + origConfig, err := orig.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for original: %w", err) + } + + newConfig, err := newBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for new base: %w", err) + } + + // Stitch together an image that contains: + // - original image's config + // - new base image's os/arch properties + // - new base image's layers + top of original image's layers + // - new base image's history + top of original image's history + rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy()) + if err != nil { + return nil, fmt.Errorf("failed to create empty image with original config: %w", err) + } + + // Add new config properties from existing images. + rebasedConfig, err := rebasedImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for rebased image: %w", err) + } + // OS/Arch properties from new base + rebasedConfig.Architecture = newConfig.Architecture + rebasedConfig.OS = newConfig.OS + rebasedConfig.OSVersion = newConfig.OSVersion + + // Apply config properties to rebased. + rebasedImage, err = ConfigFile(rebasedImage, rebasedConfig) + if err != nil { + return nil, fmt.Errorf("failed to replace config for rebased image: %w", err) + } + + // Get new base layers and config for history. + newBaseLayers, err := newBase.Layers() + if err != nil { + return nil, fmt.Errorf("could not get new base layers for new base: %w", err) + } + // Add new base layers. + rebasedImage, err = Append(rebasedImage, createAddendums(0, 0, newConfig.History, newBaseLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append new base image: %w", err) + } + + // Add original layers above the old base. + rebasedImage, err = Append(rebasedImage, createAddendums(len(oldConfig.History), len(oldBaseLayers)+1, origConfig.History, origLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append original image: %w", err) + } + + return rebasedImage, nil +} + +// createAddendums makes a list of addendums from a history and layers starting from a specific history and layer +// indexes. +func createAddendums(startHistory, startLayer int, history []v1.History, layers []v1.Layer) []Addendum { + var adds []Addendum + // History should be a superset of layers; empty layers (e.g. ENV statements) only exist in history. + // They cannot be iterated identically but must be walked independently, only advancing the iterator for layers + // when a history entry for a non-empty layer is seen. + layerIndex := 0 + for historyIndex := range history { + var layer v1.Layer + emptyLayer := history[historyIndex].EmptyLayer + if !emptyLayer { + layer = layers[layerIndex] + layerIndex++ + } + if historyIndex >= startHistory || layerIndex >= startLayer { + adds = append(adds, Addendum{ + Layer: layer, + History: history[historyIndex], + }) + } + } + // In the event history was malformed or non-existent, append the remaining layers. + for i := layerIndex; i < len(layers); i++ { + if i >= startLayer { + adds = append(adds, Addendum{Layer: layers[layerIndex]}) + } + } + + return adds +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md new file mode 100644 index 0000000000..53ebbc6ccf --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md @@ -0,0 +1,82 @@ +# `partial` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial) + +## Partial Implementations + +There are roughly two kinds of image representations: compressed and uncompressed. + +The implementations for these kinds of images are almost identical, with the only +major difference being how blobs (config and layers) are fetched. This common +code lives in this package, where you provide a _partial_ implementation of a +compressed or uncompressed image, and you get back a full `v1.Image` implementation. + +### Examples + +In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms +of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`: + +```go +type CompressedImageCore interface { + RawConfigFile() ([]byte, error) + MediaType() (types.MediaType, error) + RawManifest() ([]byte, error) + LayerByDigest(v1.Hash) (CompressedLayer, error) +} +``` + +In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms +of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`: + +```go +type UncompressedImageCore interface { + RawConfigFile() ([]byte, error) + MediaType() (types.MediaType, error) + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} +``` + +## Optional Methods + +Where possible, we access some information via optional methods as an optimization. + +### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor) + +There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data: + +* `MediaType` +* `Platform` +* `URLs` +* `Annotations` + +For example, in a `tarball.Image`, there is a `LayerSources` field that contains +an entire layer descriptor with `URLs` information for foreign layers. This +information can be passed through to callers by implementing this optional +`Descriptor` method. + +See [`#654`](https://github.com/google/go-containerregistry/pull/654). + +### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize) + +Usually, you don't need to know the uncompressed size of a layer, since that +information isn't stored in a config file (just he sha256 is needed); however, +there are cases where it is very helpful to know the layer size, e.g. when +writing the uncompressed layer into a tarball. + +See [`#655`](https://github.com/google/go-containerregistry/pull/655). + +### [`partial.Exists`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Exists) + +We generally don't care about the existence of something as granular as a +layer, and would rather ensure all the invariants of an image are upheld via +the `validate` package. However, there are situations where we want to do a +quick smoke test to ensure that the underlying storage engine hasn't been +corrupted by something e.g. deleting files or blobs. Thus, we've exposed an +optional `Exists` method that does an existence check without actually reading +any bytes. + +The `remote` package implements this via `HEAD` requests. + +The `layout` package implements this via `os.Stat`. + +See [`#838`](https://github.com/google/go-containerregistry/pull/838). diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go new file mode 100644 index 0000000000..44989ac968 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go @@ -0,0 +1,188 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "io" + + "github.com/google/go-containerregistry/internal/and" + "github.com/google/go-containerregistry/internal/compression" + "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + comp "github.com/google/go-containerregistry/pkg/compression" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// CompressedLayer represents the bare minimum interface a natively +// compressed layer must implement for us to produce a v1.Layer +type CompressedLayer interface { + // Digest returns the Hash of the compressed layer. + Digest() (v1.Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // Returns the mediaType for the compressed Layer + MediaType() (types.MediaType, error) +} + +// compressedLayerExtender implements v1.Image using the compressed base properties. +type compressedLayerExtender struct { + CompressedLayer +} + +// Uncompressed implements v1.Layer +func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { + rc, err := cle.Compressed() + if err != nil { + return nil, err + } + + // Often, the "compressed" bytes are not actually-compressed. + // Peek at the first two bytes to determine whether it's correct to + // wrap this with gzip.UnzipReadCloser or zstd.UnzipReadCloser. + cp, pr, err := compression.PeekCompression(rc) + if err != nil { + return nil, err + } + + prc := &and.ReadCloser{ + Reader: pr, + CloseFunc: rc.Close, + } + + switch cp { + case comp.GZip: + return gzip.UnzipReadCloser(prc) + case comp.ZStd: + return zstd.UnzipReadCloser(prc) + default: + return prc, nil + } +} + +// DiffID implements v1.Layer +func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) { + // If our nested CompressedLayer implements DiffID, + // then delegate to it instead. + if wdi, ok := cle.CompressedLayer.(WithDiffID); ok { + return wdi.DiffID() + } + r, err := cle.Uncompressed() + if err != nil { + return v1.Hash{}, err + } + defer r.Close() + h, _, err := v1.SHA256(r) + return h, err +} + +// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer +func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { + return &compressedLayerExtender{ul}, nil +} + +// CompressedImageCore represents the base minimum interface a natively +// compressed image must implement for us to produce a v1.Image. +type CompressedImageCore interface { + ImageCore + + // RawManifest returns the serialized bytes of the manifest. + RawManifest() ([]byte, error) + + // LayerByDigest is a variation on the v1.Image method, which returns + // a CompressedLayer instead. + LayerByDigest(v1.Hash) (CompressedLayer, error) +} + +// compressedImageExtender implements v1.Image by extending CompressedImageCore with the +// appropriate methods computed from the minimal core. +type compressedImageExtender struct { + CompressedImageCore +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*compressedImageExtender)(nil) + +// Digest implements v1.Image +func (i *compressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// ConfigName implements v1.Image +func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// Layers implements v1.Image +func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { + hs, err := FSLayers(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(hs)) + for _, h := range hs { + l, err := i.LayerByDigest(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDigest implements v1.Image +func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + cl, err := i.CompressedImageCore.LayerByDigest(h) + if err != nil { + return nil, err + } + return CompressedToLayer(cl) +} + +// LayerByDiffID implements v1.Image +func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + h, err := DiffIDToBlob(i, h) + if err != nil { + return nil, err + } + return i.LayerByDigest(h) +} + +// ConfigFile implements v1.Image +func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Manifest implements v1.Image +func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { + return Manifest(i) +} + +// Size implements v1.Image +func (i *compressedImageExtender) Size() (int64, error) { + return Size(i) +} + +// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image +func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { + return &compressedImageExtender{ + CompressedImageCore: cic, + }, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go new file mode 100644 index 0000000000..153dfe4d53 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package partial defines methods for building up a v1.Image from +// minimal subsets that are sufficient for defining a v1.Image. +package partial diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go new file mode 100644 index 0000000000..c65f45e0dc --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageCore is the core set of properties without which we cannot build a v1.Image +type ImageCore interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go new file mode 100644 index 0000000000..10cfb2b2f6 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go @@ -0,0 +1,165 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher. +func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) { + // get the actual manifest list + indexManifest, err := index.IndexManifest() + if err != nil { + return nil, fmt.Errorf("unable to get raw index: %w", err) + } + manifests := []v1.Descriptor{} + // try to get the root of our image + for _, manifest := range indexManifest.Manifests { + if matcher(manifest) { + manifests = append(manifests, manifest) + } + } + return manifests, nil +} + +// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor +// matches the provider Matcher, but the referenced item is not an Image, ignores it. +// Only returns those that match the Matcher and are images. +func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) { + matches := []v1.Image{} + manifests, err := FindManifests(index, matcher) + if err != nil { + return nil, err + } + for _, desc := range manifests { + // if it is not an image, ignore it + if !desc.MediaType.IsImage() { + continue + } + img, err := index.Image(desc.Digest) + if err != nil { + return nil, err + } + matches = append(matches, img) + } + return matches, nil +} + +// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor +// matches the provider Matcher, but the referenced item is not an Index, ignores it. +// Only returns those that match the Matcher and are indexes. +func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) { + matches := []v1.ImageIndex{} + manifests, err := FindManifests(index, matcher) + if err != nil { + return nil, err + } + for _, desc := range manifests { + if !desc.MediaType.IsIndex() { + continue + } + // if it is not an index, ignore it + idx, err := index.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + matches = append(matches, idx) + } + return matches, nil +} + +type withManifests interface { + Manifests() ([]Describable, error) +} + +type withLayer interface { + Layer(v1.Hash) (v1.Layer, error) +} + +type describable struct { + desc v1.Descriptor +} + +func (d describable) Digest() (v1.Hash, error) { + return d.desc.Digest, nil +} + +func (d describable) Size() (int64, error) { + return d.desc.Size, nil +} + +func (d describable) MediaType() (types.MediaType, error) { + return d.desc.MediaType, nil +} + +func (d describable) Descriptor() (*v1.Descriptor, error) { + return &d.desc, nil +} + +// Manifests is analogous to v1.Image.Layers in that it allows values in the +// returned list to be lazily evaluated, which enables an index to contain +// an image that contains a streaming layer. +// +// This should have been part of the v1.ImageIndex interface, but wasn't. +// It is instead usable through this extension interface. +func Manifests(idx v1.ImageIndex) ([]Describable, error) { + if wm, ok := idx.(withManifests); ok { + return wm.Manifests() + } + + return ComputeManifests(idx) +} + +// ComputeManifests provides a fallback implementation for Manifests. +func ComputeManifests(idx v1.ImageIndex) ([]Describable, error) { + m, err := idx.IndexManifest() + if err != nil { + return nil, err + } + manifests := []Describable{} + for _, desc := range m.Manifests { + switch { + case desc.MediaType.IsImage(): + img, err := idx.Image(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, img) + case desc.MediaType.IsIndex(): + idx, err := idx.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, idx) + default: + if wl, ok := idx.(withLayer); ok { + layer, err := wl.Layer(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, layer) + } else { + manifests = append(manifests, describable{desc}) + } + } + } + + return manifests, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go new file mode 100644 index 0000000000..df20d3aa9e --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go @@ -0,0 +1,223 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "io" + "sync" + + "github.com/google/go-containerregistry/internal/gzip" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// UncompressedLayer represents the bare minimum interface a natively +// uncompressed layer must implement for us to produce a v1.Layer +type UncompressedLayer interface { + // DiffID returns the Hash of the uncompressed layer. + DiffID() (v1.Hash, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Returns the mediaType for the compressed Layer + MediaType() (types.MediaType, error) +} + +// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. +type uncompressedLayerExtender struct { + UncompressedLayer + // Memoize size/hash so that the methods aren't twice as + // expensive as doing this manually. + hash v1.Hash + size int64 + hashSizeError error + once sync.Once +} + +// Compressed implements v1.Layer +func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { + u, err := ule.Uncompressed() + if err != nil { + return nil, err + } + return gzip.ReadCloser(u), nil +} + +// Digest implements v1.Layer +func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { + ule.calcSizeHash() + return ule.hash, ule.hashSizeError +} + +// Size implements v1.Layer +func (ule *uncompressedLayerExtender) Size() (int64, error) { + ule.calcSizeHash() + return ule.size, ule.hashSizeError +} + +func (ule *uncompressedLayerExtender) calcSizeHash() { + ule.once.Do(func() { + var r io.ReadCloser + r, ule.hashSizeError = ule.Compressed() + if ule.hashSizeError != nil { + return + } + defer r.Close() + ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) + }) +} + +// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer +func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { + return &uncompressedLayerExtender{UncompressedLayer: ul}, nil +} + +// UncompressedImageCore represents the bare minimum interface a natively +// uncompressed image must implement for us to produce a v1.Image +type UncompressedImageCore interface { + ImageCore + + // LayerByDiffID is a variation on the v1.Image method, which returns + // an UncompressedLayer instead. + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} + +// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. +func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { + return &uncompressedImageExtender{ + UncompressedImageCore: uic, + }, nil +} + +// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the +// appropriate methods computed from the minimal core. +type uncompressedImageExtender struct { + UncompressedImageCore + + lock sync.Mutex + manifest *v1.Manifest +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*uncompressedImageExtender)(nil) + +// Digest implements v1.Image +func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// Manifest implements v1.Image +func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { + i.lock.Lock() + defer i.lock.Unlock() + if i.manifest != nil { + return i.manifest, nil + } + + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + m := &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + ls, err := i.Layers() + if err != nil { + return nil, err + } + + m.Layers = make([]v1.Descriptor, len(ls)) + for i, l := range ls { + desc, err := Descriptor(l) + if err != nil { + return nil, err + } + + m.Layers[i] = *desc + } + + i.manifest = m + return i.manifest, nil +} + +// RawManifest implements v1.Image +func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { + return RawManifest(i) +} + +// Size implements v1.Image +func (i *uncompressedImageExtender) Size() (int64, error) { + return Size(i) +} + +// ConfigName implements v1.Image +func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// ConfigFile implements v1.Image +func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Layers implements v1.Image +func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { + diffIDs, err := DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDiffID implements v1.Image +func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { + ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) + if err != nil { + return nil, err + } + return UncompressedToLayer(ul) +} + +// LayerByDigest implements v1.Image +func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + diffID, err := BlobToDiffID(i, h) + if err != nil { + return nil, err + } + return i.LayerByDiffID(diffID) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go new file mode 100644 index 0000000000..c8b22b3f95 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -0,0 +1,436 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// WithRawConfigFile defines the subset of v1.Image used by these helper methods +type WithRawConfigFile interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) +} + +// ConfigFile is a helper for implementing v1.Image +func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return v1.ParseConfigFile(bytes.NewReader(b)) +} + +// ConfigName is a helper for implementing v1.Image +func ConfigName(i WithRawConfigFile) (v1.Hash, error) { + b, err := i.RawConfigFile() + if err != nil { + return v1.Hash{}, err + } + h, _, err := v1.SHA256(bytes.NewReader(b)) + return h, err +} + +type configLayer struct { + hash v1.Hash + content []byte +} + +// Digest implements v1.Layer +func (cl *configLayer) Digest() (v1.Hash, error) { + return cl.hash, nil +} + +// DiffID implements v1.Layer +func (cl *configLayer) DiffID() (v1.Hash, error) { + return cl.hash, nil +} + +// Uncompressed implements v1.Layer +func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(cl.content)), nil +} + +// Compressed implements v1.Layer +func (cl *configLayer) Compressed() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(cl.content)), nil +} + +// Size implements v1.Layer +func (cl *configLayer) Size() (int64, error) { + return int64(len(cl.content)), nil +} + +func (cl *configLayer) MediaType() (types.MediaType, error) { + // Defaulting this to OCIConfigJSON as it should remain + // backwards compatible with DockerConfigJSON + return types.OCIConfigJSON, nil +} + +var _ v1.Layer = (*configLayer)(nil) + +// withConfigLayer allows partial image implementations to provide a layer +// for their config file. +type withConfigLayer interface { + ConfigLayer() (v1.Layer, error) +} + +// ConfigLayer implements v1.Layer from the raw config bytes. +// This is so that clients (e.g. remote) can access the config as a blob. +// +// Images that want to return a specific layer implementation can implement +// withConfigLayer. +func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { + if wcl, ok := unwrap(i).(withConfigLayer); ok { + return wcl.ConfigLayer() + } + + h, err := ConfigName(i) + if err != nil { + return nil, err + } + rcfg, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return &configLayer{ + hash: h, + content: rcfg, + }, nil +} + +// WithConfigFile defines the subset of v1.Image used by these helper methods +type WithConfigFile interface { + // ConfigFile returns this image's config file. + ConfigFile() (*v1.ConfigFile, error) +} + +// DiffIDs is a helper for implementing v1.Image +func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return cfg.RootFS.DiffIDs, nil +} + +// RawConfigFile is a helper for implementing v1.Image +func RawConfigFile(i WithConfigFile) ([]byte, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return json.Marshal(cfg) +} + +// WithRawManifest defines the subset of v1.Image used by these helper methods +type WithRawManifest interface { + // RawManifest returns the serialized bytes of this image's config file. + RawManifest() ([]byte, error) +} + +// Digest is a helper for implementing v1.Image +func Digest(i WithRawManifest) (v1.Hash, error) { + mb, err := i.RawManifest() + if err != nil { + return v1.Hash{}, err + } + digest, _, err := v1.SHA256(bytes.NewReader(mb)) + return digest, err +} + +// Manifest is a helper for implementing v1.Image +func Manifest(i WithRawManifest) (*v1.Manifest, error) { + b, err := i.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseManifest(bytes.NewReader(b)) +} + +// WithManifest defines the subset of v1.Image used by these helper methods +type WithManifest interface { + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// RawManifest is a helper for implementing v1.Image +func RawManifest(i WithManifest) ([]byte, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// Size is a helper for implementing v1.Image +func Size(i WithRawManifest) (int64, error) { + b, err := i.RawManifest() + if err != nil { + return -1, err + } + return int64(len(b)), nil +} + +// FSLayers is a helper for implementing v1.Image +func FSLayers(i WithManifest) ([]v1.Hash, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + fsl := make([]v1.Hash, len(m.Layers)) + for i, l := range m.Layers { + fsl[i] = l.Digest + } + return fsl, nil +} + +// BlobSize is a helper for implementing v1.Image +func BlobSize(i WithManifest, h v1.Hash) (int64, error) { + d, err := BlobDescriptor(i, h) + if err != nil { + return -1, err + } + return d.Size, nil +} + +// BlobDescriptor is a helper for implementing v1.Image +func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + + if m.Config.Digest == h { + return &m.Config, nil + } + + for _, l := range m.Layers { + if l.Digest == h { + return &l, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} + +// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods +type WithManifestAndConfigFile interface { + WithConfigFile + + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// BlobToDiffID is a helper for mapping between compressed +// and uncompressed blob hashes. +func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(i) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(i) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, blob := range blobs { + if blob == h { + return diffIDs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown blob %v", h) +} + +// DiffIDToBlob is a helper for mapping between uncompressed +// and compressed blob hashes. +func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(wm) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(wm) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, diffID := range diffIDs { + if diffID == h { + return blobs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) +} + +// WithDiffID defines the subset of v1.Layer for exposing the DiffID method. +type WithDiffID interface { + DiffID() (v1.Hash, error) +} + +// withDescriptor allows partial layer implementations to provide a layer +// descriptor to the partial image manifest builder. This allows partial +// uncompressed layers to provide foreign layer metadata like URLs to the +// uncompressed image manifest. +type withDescriptor interface { + Descriptor() (*v1.Descriptor, error) +} + +// Describable represents something for which we can produce a v1.Descriptor. +type Describable interface { + Digest() (v1.Hash, error) + MediaType() (types.MediaType, error) + Size() (int64, error) +} + +// Descriptor returns a v1.Descriptor given a Describable. It also encodes +// some logic for unwrapping things that have been wrapped by +// CompressedToLayer, UncompressedToLayer, CompressedToImage, or +// UncompressedToImage. +func Descriptor(d Describable) (*v1.Descriptor, error) { + // If Describable implements Descriptor itself, return that. + if wd, ok := unwrap(d).(withDescriptor); ok { + return wd.Descriptor() + } + + // If all else fails, compute the descriptor from the individual methods. + var ( + desc v1.Descriptor + err error + ) + + if desc.Size, err = d.Size(); err != nil { + return nil, err + } + if desc.Digest, err = d.Digest(); err != nil { + return nil, err + } + if desc.MediaType, err = d.MediaType(); err != nil { + return nil, err + } + if wat, ok := d.(withArtifactType); ok { + if desc.ArtifactType, err = wat.ArtifactType(); err != nil { + return nil, err + } + } else { + if wrm, ok := d.(WithRawManifest); ok && desc.MediaType.IsImage() { + mf, _ := Manifest(wrm) + // Failing to parse as a manifest should just be ignored. + // The manifest might not be valid, and that's okay. + if mf != nil && !mf.Config.MediaType.IsConfig() { + desc.ArtifactType = string(mf.Config.MediaType) + } + } + } + + return &desc, nil +} + +type withArtifactType interface { + ArtifactType() (string, error) +} + +type withUncompressedSize interface { + UncompressedSize() (int64, error) +} + +// UncompressedSize returns the size of the Uncompressed layer. If the +// underlying implementation doesn't implement UncompressedSize directly, +// this will compute the uncompressedSize by reading everything returned +// by Compressed(). This is potentially expensive and may consume the contents +// for streaming layers. +func UncompressedSize(l v1.Layer) (int64, error) { + // If the layer implements UncompressedSize itself, return that. + if wus, ok := unwrap(l).(withUncompressedSize); ok { + return wus.UncompressedSize() + } + + // The layer doesn't implement UncompressedSize, we need to compute it. + rc, err := l.Uncompressed() + if err != nil { + return -1, err + } + defer rc.Close() + + return io.Copy(io.Discard, rc) +} + +type withExists interface { + Exists() (bool, error) +} + +// Exists checks to see if a layer exists. This is a hack to work around the +// mistakes of the partial package. Don't use this. +func Exists(l v1.Layer) (bool, error) { + // If the layer implements Exists itself, return that. + if we, ok := unwrap(l).(withExists); ok { + return we.Exists() + } + + // The layer doesn't implement Exists, so we hope that calling Compressed() + // is enough to trigger an error if the layer does not exist. + rc, err := l.Compressed() + if err != nil { + return false, err + } + defer rc.Close() + + // We may want to try actually reading a single byte, but if we need to do + // that, we should just fix this hack. + return true, nil +} + +// Recursively unwrap our wrappers so that we can check for the original implementation. +// We might want to expose this? +func unwrap(i any) any { + if ule, ok := i.(*uncompressedLayerExtender); ok { + return unwrap(ule.UncompressedLayer) + } + if cle, ok := i.(*compressedLayerExtender); ok { + return unwrap(cle.CompressedLayer) + } + if uie, ok := i.(*uncompressedImageExtender); ok { + return unwrap(uie.UncompressedImageCore) + } + if cie, ok := i.(*compressedImageExtender); ok { + return unwrap(cie.CompressedImageCore) + } + return i +} + +// ArtifactType returns the artifact type for the given manifest. +// +// If the manifest reports its own artifact type, that's returned, otherwise +// the manifest is parsed and, if successful, its config.mediaType is returned. +func ArtifactType(w WithManifest) (string, error) { + if wat, ok := w.(withArtifactType); ok { + return wat.ArtifactType() + } + mf, _ := w.Manifest() + // Failing to parse as a manifest should just be ignored. + // The manifest might not be valid, and that's okay. + if mf != nil && !mf.Config.MediaType.IsConfig() { + return string(mf.Config.MediaType), nil + } + return "", nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go new file mode 100644 index 0000000000..59ca402698 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go @@ -0,0 +1,149 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "sort" + "strings" +) + +// Platform represents the target os/arch for an image. +type Platform struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` +} + +func (p Platform) String() string { + if p.OS == "" { + return "" + } + var b strings.Builder + b.WriteString(p.OS) + if p.Architecture != "" { + b.WriteString("/") + b.WriteString(p.Architecture) + } + if p.Variant != "" { + b.WriteString("/") + b.WriteString(p.Variant) + } + if p.OSVersion != "" { + b.WriteString(":") + b.WriteString(p.OSVersion) + } + return b.String() +} + +// ParsePlatform parses a string representing a Platform, if possible. +func ParsePlatform(s string) (*Platform, error) { + var p Platform + parts := strings.Split(strings.TrimSpace(s), ":") + if len(parts) == 2 { + p.OSVersion = parts[1] + } + parts = strings.Split(parts[0], "/") + if len(parts) > 0 { + p.OS = parts[0] + } + if len(parts) > 1 { + p.Architecture = parts[1] + } + if len(parts) > 2 { + p.Variant = parts[2] + } + if len(parts) > 3 { + return nil, fmt.Errorf("too many slashes in platform spec: %s", s) + } + return &p, nil +} + +// Equals returns true if the given platform is semantically equivalent to this one. +// The order of Features and OSFeatures is not important. +func (p Platform) Equals(o Platform) bool { + return p.OS == o.OS && + p.Architecture == o.Architecture && + p.Variant == o.Variant && + p.OSVersion == o.OSVersion && + stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && + stringSliceEqualIgnoreOrder(p.Features, o.Features) +} + +// Satisfies returns true if this Platform "satisfies" the given spec Platform. +// +// Note that this is different from Equals and that Satisfies is not reflexive. +// +// The given spec represents "requirements" such that any missing values in the +// spec are not compared. +// +// For OSFeatures and Features, Satisfies will return true if this Platform's +// fields contain a superset of the values in the spec's fields (order ignored). +func (p Platform) Satisfies(spec Platform) bool { + return satisfies(spec.OS, p.OS) && + satisfies(spec.Architecture, p.Architecture) && + satisfies(spec.Variant, p.Variant) && + satisfies(spec.OSVersion, p.OSVersion) && + satisfiesList(spec.OSFeatures, p.OSFeatures) && + satisfiesList(spec.Features, p.Features) +} + +func satisfies(want, have string) bool { + return want == "" || want == have +} + +func satisfiesList(want, have []string) bool { + if len(want) == 0 { + return true + } + + set := map[string]struct{}{} + for _, h := range have { + set[h] = struct{}{} + } + + for _, w := range want { + if _, ok := set[w]; !ok { + return false + } + } + + return true +} + +// stringSliceEqual compares 2 string slices and returns if their contents are identical. +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, elm := range a { + if elm != b[i] { + return false + } + } + return true +} + +// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order +func stringSliceEqualIgnoreOrder(a, b []string) bool { + if a != nil && b != nil { + sort.Strings(a) + sort.Strings(b) + } + return stringSliceEqual(a, b) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go new file mode 100644 index 0000000000..844f04d937 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go @@ -0,0 +1,25 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Update representation of an update of transfer progress. Some functions +// in this module can take a channel to which updates will be sent while a +// transfer is in progress. +// +k8s:deepcopy-gen=false +type Update struct { + Total int64 + Complete int64 + Error error +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md new file mode 100644 index 0000000000..c1e81b310b --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md @@ -0,0 +1,117 @@ +# `remote` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) + +The `remote` package implements a client for accessing a registry, +per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md). + +It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the +authentication handshake and structured errors. + +## Usage + +```go +package main + +import ( + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + ref, err := name.ParseReference("gcr.io/google-containers/pause") + if err != nil { + panic(err) + } + + img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + panic(err) + } + + // do stuff with img +} +``` + +## Structure + +

+ +

+ + +## Background + +There are a lot of confusingly similar terms that come up when talking about images in registries. + +### Anatomy of an image + +In general... + +* A tag refers to an image manifest. +* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest. +* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration. +* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image. + +For example, an image with two layers would look something like this: + +![image anatomy](/images/image-anatomy.dot.svg) + +### Anatomy of an index + +In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image. +This was the original use case for a [manifest +list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list). + +![image index anatomy](/images/index-anatomy.dot.svg) + +It is possible for an index to reference another index, per the OCI +[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix). +In theory, both an image and image index can reference arbitrary things via +[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md), +e.g. see the [image layout +example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example), +which references an application/xml file from an image index. + +That could look something like this: + +![strange image index anatomy](/images/index-anatomy-strange.dot.svg) + +Using a recursive index like this might not be possible with all registries, +but this flexibility allows for some interesting applications, e.g. the +[OCI Artifacts](https://github.com/opencontainers/artifacts) effort. + +### Anatomy of an image upload + +The structure of an image requires a delicate ordering when uploading an image to a registry. +Below is a (slightly simplified) figure that describes how an image is prepared for upload +to a registry and how the data flows between various artifacts: + +![upload](/images/upload.dot.svg) + +Note that: + +* A config file references the uncompressed layer contents by sha256. +* A manifest references the compressed layer contents by sha256 and the size of the layer. +* A manifest references the config file contents by sha256 and the size of the file. + +It follows that during an upload, we need to upload layers before the config file, +and we need to upload the config file before the manifest. + +Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image), +so the ordering is less important. + +In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer), +we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering. + +## Caveats + +### schema 1 + +This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377), +however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get), +which doesn't try to interpret what is returned by the registry. + +[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images, +see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go). diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go new file mode 100644 index 0000000000..a0281b9fd2 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go @@ -0,0 +1,159 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +type Catalogs struct { + Repos []string `json:"repositories"` + Next string `json:"next,omitempty"` +} + +// CatalogPage calls /_catalog, returning the list of repositories on the registry. +func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + + f, err := newPuller(o).fetcher(o.context, target) + if err != nil { + return nil, err + } + + uri := url.URL{ + Scheme: target.Scheme(), + Host: target.RegistryStr(), + Path: "/v2/_catalog", + RawQuery: fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n), + } + + req, err := http.NewRequest(http.MethodGet, uri.String(), nil) + if err != nil { + return nil, err + } + resp, err := f.client.Do(req.WithContext(o.context)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + var parsed Catalogs + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + + return parsed.Repos, nil +} + +// Catalog calls /_catalog, returning the list of repositories on the registry. +func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + + // WithContext overrides the ctx passed directly. + if o.context != context.Background() { + ctx = o.context + } + + return newPuller(o).catalog(ctx, target, o.pageSize) +} + +func (f *fetcher) catalogPage(ctx context.Context, reg name.Registry, next string, pageSize int) (*Catalogs, error) { + if next == "" { + uri := &url.URL{ + Scheme: reg.Scheme(), + Host: reg.RegistryStr(), + Path: "/v2/_catalog", + } + if pageSize > 0 { + uri.RawQuery = fmt.Sprintf("n=%d", pageSize) + } + next = uri.String() + } + + req, err := http.NewRequestWithContext(ctx, "GET", next, nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(req) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + parsed := Catalogs{} + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + + if err := resp.Body.Close(); err != nil { + return nil, err + } + + uri, err := getNextPageURL(resp) + if err != nil { + return nil, err + } + + if uri != nil { + parsed.Next = uri.String() + } + + return &parsed, nil +} + +type Catalogger struct { + f *fetcher + reg name.Registry + pageSize int + + page *Catalogs + err error + + needMore bool +} + +func (l *Catalogger) Next(ctx context.Context) (*Catalogs, error) { + if l.needMore { + l.page, l.err = l.f.catalogPage(ctx, l.reg, l.page.Next, l.pageSize) + } else { + l.needMore = true + } + return l.page, l.err +} + +func (l *Catalogger) HasNext() bool { + return l.page != nil && (!l.needMore || l.page.Next != "") +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go new file mode 100644 index 0000000000..b4395c2391 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go @@ -0,0 +1,72 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// CheckPushPermission returns an error if the given keychain cannot authorize +// a push operation to the given ref. +// +// This can be useful to check whether the caller has permission to push an +// image before doing work to construct the image. +// +// TODO(#412): Remove the need for this method. +func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error { + auth, err := kc.Resolve(ref.Context().Registry) + if err != nil { + return fmt.Errorf("resolving authorization for %v failed: %w", ref.Context().Registry, err) + } + + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.NewWithContext(context.TODO(), ref.Context().Registry, auth, t, scopes) + if err != nil { + return fmt.Errorf("creating push check transport for %v failed: %w", ref.Context().Registry, err) + } + // TODO(jasonhall): Against GCR, just doing the token handshake is + // enough, but this doesn't extend to Dockerhub + // (https://github.com/docker/hub-feedback/issues/1771), so we actually + // need to initiate an upload to tell whether the credentials can + // authorize a push. Figure out how to return early here when we can, + // to avoid a roundtrip for spec-compliant registries. + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + } + loc, _, err := w.initiateUpload(context.Background(), "", "", "") + if loc != "" { + // Since we're only initiating the upload to check whether we + // can, we should attempt to cancel it, in case initiating + // reserves some resources on the server. We shouldn't wait for + // cancelling to complete, and we don't care if it fails. + go w.cancelUpload(loc) + } + return err +} + +func (w *writer) cancelUpload(loc string) { + req, err := http.NewRequest(http.MethodDelete, loc, nil) + if err != nil { + return + } + _, _ = w.client.Do(req) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go new file mode 100644 index 0000000000..36e1d0816e --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "github.com/google/go-containerregistry/pkg/name" +) + +// Delete removes the specified image reference from the remote registry. +func Delete(ref name.Reference, options ...Option) error { + o, err := makeOptions(options...) + if err != nil { + return err + } + return newPusher(o).Delete(o.context, ref) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go new file mode 100644 index 0000000000..fafe910e95 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go @@ -0,0 +1,198 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "errors" + "fmt" + + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var allManifestMediaTypes = append(append([]types.MediaType{ + types.DockerManifestSchema1, + types.DockerManifestSchema1Signed, +}, acceptableImageMediaTypes...), acceptableIndexMediaTypes...) + +// ErrSchema1 indicates that we received a schema1 manifest from the registry. +// This library doesn't have plans to support this legacy image format: +// https://github.com/google/go-containerregistry/issues/377 +var ErrSchema1 = errors.New("see https://github.com/google/go-containerregistry/issues/377") + +// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType. +func newErrSchema1(schema types.MediaType) error { + return fmt.Errorf("unsupported MediaType: %q, %w", schema, ErrSchema1) +} + +// Descriptor provides access to metadata about remote artifact and accessors +// for efficiently converting it into a v1.Image or v1.ImageIndex. +type Descriptor struct { + fetcher fetcher + v1.Descriptor + + ref name.Reference + Manifest []byte + ctx context.Context + + // So we can share this implementation with Image. + platform v1.Platform +} + +func (d *Descriptor) toDesc() v1.Descriptor { + return d.Descriptor +} + +// RawManifest exists to satisfy the Taggable interface. +func (d *Descriptor) RawManifest() ([]byte, error) { + return d.Manifest, nil +} + +// Get returns a remote.Descriptor for the given reference. The response from +// the registry is left un-interpreted, for the most part. This is useful for +// querying what kind of artifact a reference represents. +// +// See Head if you don't need the response body. +func Get(ref name.Reference, options ...Option) (*Descriptor, error) { + return get(ref, allManifestMediaTypes, options...) +} + +// Head returns a v1.Descriptor for the given reference by issuing a HEAD +// request. +// +// Note that the server response will not have a body, so any errors encountered +// should be retried with Get to get more details. +func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + + return newPuller(o).Head(o.context, ref) +} + +// Handle options and fetch the manifest with the acceptable MediaTypes in the +// Accept header. +func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + return newPuller(o).get(o.context, ref, acceptable, o.platform) +} + +// Image converts the Descriptor into a v1.Image. +// +// If the fetched artifact is already an image, it will just return it. +// +// If the fetched artifact is an index, it will attempt to resolve the index to +// a child image with the appropriate platform. +// +// See WithPlatform to set the desired platform. +func (d *Descriptor) Image() (v1.Image, error) { + switch d.MediaType { + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + // We don't care to support schema 1 images: + // https://github.com/google/go-containerregistry/issues/377 + return nil, newErrSchema1(d.MediaType) + case types.OCIImageIndex, types.DockerManifestList: + // We want an image but the registry has an index, resolve it to an image. + return d.remoteIndex().imageByPlatform(d.platform) + case types.OCIManifestSchema1, types.DockerManifestSchema2: + // These are expected. Enumerated here to allow a default case. + default: + // We could just return an error here, but some registries (e.g. static + // registries) don't set the Content-Type headers correctly, so instead... + logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType) + } + + // Wrap the v1.Layers returned by this v1.Image in a hint for downstream + // remote.Write calls to facilitate cross-repo "mounting". + imgCore, err := partial.CompressedToImage(d.remoteImage()) + if err != nil { + return nil, err + } + return &mountableImage{ + Image: imgCore, + Reference: d.ref, + }, nil +} + +// Schema1 converts the Descriptor into a v1.Image for v2 schema 1 media types. +// +// The v1.Image returned by this method does not implement the entire interface because it would be inefficient. +// This exists mostly to make it easier to copy schema 1 images around or look at their filesystems. +// This is separate from Image() to avoid a backward incompatible change for callers expecting ErrSchema1. +func (d *Descriptor) Schema1() (v1.Image, error) { + i := &schema1{ + ref: d.ref, + fetcher: d.fetcher, + ctx: d.ctx, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } + + return &mountableImage{ + Image: i, + Reference: d.ref, + }, nil +} + +// ImageIndex converts the Descriptor into a v1.ImageIndex. +func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) { + switch d.MediaType { + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + // We don't care to support schema 1 images: + // https://github.com/google/go-containerregistry/issues/377 + return nil, newErrSchema1(d.MediaType) + case types.OCIManifestSchema1, types.DockerManifestSchema2: + // We want an index but the registry has an image, nothing we can do. + return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType) + case types.OCIImageIndex, types.DockerManifestList: + // These are expected. + default: + // We could just return an error here, but some registries (e.g. static + // registries) don't set the Content-Type headers correctly, so instead... + logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType) + } + return d.remoteIndex(), nil +} + +func (d *Descriptor) remoteImage() *remoteImage { + return &remoteImage{ + ref: d.ref, + ctx: d.ctx, + fetcher: d.fetcher, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } +} + +func (d *Descriptor) remoteIndex() *remoteIndex { + return &remoteIndex{ + ref: d.ref, + ctx: d.ctx, + fetcher: d.fetcher, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go new file mode 100644 index 0000000000..846ba07cda --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package remote provides facilities for reading/writing v1.Images from/to +// a remote image registry. +package remote diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go new file mode 100644 index 0000000000..d77b37c0c2 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go @@ -0,0 +1,317 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +const ( + kib = 1024 + mib = 1024 * kib + manifestLimit = 100 * mib +) + +// fetcher implements methods for reading from a registry. +type fetcher struct { + target resource + client *http.Client +} + +func makeFetcher(ctx context.Context, target resource, o *options) (*fetcher, error) { + auth := o.auth + if o.keychain != nil { + kauth, err := authn.Resolve(ctx, o.keychain, target) + if err != nil { + return nil, err + } + auth = kauth + } + + reg, ok := target.(name.Registry) + if !ok { + repo, ok := target.(name.Repository) + if !ok { + return nil, fmt.Errorf("unexpected resource: %T", target) + } + reg = repo.Registry + } + + tr, err := transport.NewWithContext(ctx, reg, auth, o.transport, []string{target.Scope(transport.PullScope)}) + if err != nil { + return nil, err + } + return &fetcher{ + target: target, + client: &http.Client{Transport: tr}, + }, nil +} + +func (f *fetcher) Do(req *http.Request) (*http.Response, error) { + return f.client.Do(req) +} + +type resource interface { + Scheme() string + RegistryStr() string + Scope(string) string + + authn.Resource +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (f *fetcher) url(resource, identifier string) url.URL { + u := url.URL{ + Scheme: f.target.Scheme(), + Host: f.target.RegistryStr(), + // Default path if this is not a repository. + Path: "/v2/_catalog", + } + if repo, ok := f.target.(name.Repository); ok { + u.Path = fmt.Sprintf("/v2/%s/%s/%s", repo.RepositoryStr(), resource, identifier) + } + return u +} + +func (f *fetcher) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) { + b, desc, err := f.fetchManifest(ctx, ref, acceptable) + if err != nil { + return nil, err + } + return &Descriptor{ + ref: ref, + ctx: ctx, + fetcher: *f, + Manifest: b, + Descriptor: *desc, + platform: platform, + }, nil +} + +func (f *fetcher) fetchManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, nil, err + } + + manifest, err := io.ReadAll(io.LimitReader(resp.Body, manifestLimit)) + if err != nil { + return nil, nil, err + } + + digest, size, err := v1.SHA256(bytes.NewReader(manifest)) + if err != nil { + return nil, nil, err + } + + mediaType := types.MediaType(resp.Header.Get("Content-Type")) + contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) + if err == nil && mediaType == types.DockerManifestSchema1Signed { + // If we can parse the digest from the header, and it's a signed schema 1 + // manifest, let's use that for the digest to appease older registries. + digest = contentDigest + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref) + } + } + + var artifactType string + mf, _ := v1.ParseManifest(bytes.NewReader(manifest)) + // Failing to parse as a manifest should just be ignored. + // The manifest might not be valid, and that's okay. + if mf != nil && !mf.Config.MediaType.IsConfig() { + artifactType = string(mf.Config.MediaType) + } + + // Do nothing for tags; I give up. + // + // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, + // but so many registries implement this incorrectly that it's not worth checking. + // + // For reference: + // https://github.com/GoogleContainerTools/kaniko/issues/298 + + // Return all this info since we have to calculate it anyway. + desc := v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + ArtifactType: artifactType, + } + + return manifest, &desc, nil +} + +func (f *fetcher) headManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + mth := resp.Header.Get("Content-Type") + if mth == "" { + return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String()) + } + mediaType := types.MediaType(mth) + + size := resp.ContentLength + if size == -1 { + return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String()) + } + + dh := resp.Header.Get("Docker-Content-Digest") + if dh == "" { + return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String()) + } + digest, err := v1.NewHash(dh) + if err != nil { + return nil, err + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref) + } + } + + // Return all this info since we have to calculate it anyway. + return &v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + }, nil +} + +func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, redact.Error(err) + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + // Do whatever we can. + // If we have an expected size and Content-Length doesn't match, return an error. + // If we don't have an expected size and we do have a Content-Length, use Content-Length. + if hsize := resp.ContentLength; hsize != -1 { + if size == verify.SizeUnknown { + size = hsize + } else if hsize != size { + return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size) + } + } + + return verify.ReadCloser(resp.Body, size, h) +} + +func (f *fetcher) headBlob(ctx context.Context, h v1.Hash) (*http.Response, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, redact.Error(err) + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + return resp, nil +} + +func (f *fetcher) blobExists(ctx context.Context, h v1.Hash) (bool, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return false, redact.Error(err) + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go new file mode 100644 index 0000000000..f085967ed5 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -0,0 +1,277 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "io" + "net/http" + "net/url" + "sync" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var acceptableImageMediaTypes = []types.MediaType{ + types.DockerManifestSchema2, + types.OCIManifestSchema1, +} + +// remoteImage accesses an image from a remote registry +type remoteImage struct { + fetcher fetcher + ref name.Reference + ctx context.Context + manifestLock sync.Mutex // Protects manifest + manifest []byte + configLock sync.Mutex // Protects config + config []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +func (r *remoteImage) ArtifactType() (string, error) { + // kind of a hack, but RawManifest does appropriate locking/memoization + // and makes sure r.descriptor is populated. + if _, err := r.RawManifest(); err != nil { + return "", err + } + return r.descriptor.ArtifactType, nil +} + +var _ partial.CompressedImageCore = (*remoteImage)(nil) + +// Image provides access to a remote image reference. +func Image(ref name.Reference, options ...Option) (v1.Image, error) { + desc, err := Get(ref, options...) + if err != nil { + return nil, err + } + + return desc.Image() +} + +func (r *remoteImage) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestSchema2, nil +} + +func (r *remoteImage) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + // NOTE(jonjohnsonjr): We should never get here because the public entrypoints + // do type-checking via remote.Descriptor. I've left this here for tests that + // directly instantiate a remoteImage. + manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableImageMediaTypes) + if err != nil { + return nil, err + } + + if r.descriptor == nil { + r.descriptor = desc + } + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteImage) RawConfigFile() ([]byte, error) { + r.configLock.Lock() + defer r.configLock.Unlock() + if r.config != nil { + return r.config, nil + } + + m, err := partial.Manifest(r) + if err != nil { + return nil, err + } + + if m.Config.Data != nil { + if err := verify.Descriptor(m.Config); err != nil { + return nil, err + } + r.config = m.Config.Data + return r.config, nil + } + + body, err := r.fetcher.fetchBlob(r.ctx, m.Config.Size, m.Config.Digest) + if err != nil { + return nil, err + } + defer body.Close() + + r.config, err = io.ReadAll(body) + if err != nil { + return nil, err + } + return r.config, nil +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (r *remoteImage) Descriptor() (*v1.Descriptor, error) { + // kind of a hack, but RawManifest does appropriate locking/memoization + // and makes sure r.descriptor is populated. + _, err := r.RawManifest() + return r.descriptor, err +} + +func (r *remoteImage) ConfigLayer() (v1.Layer, error) { + if _, err := r.RawManifest(); err != nil { + return nil, err + } + m, err := partial.Manifest(r) + if err != nil { + return nil, err + } + + return partial.CompressedToLayer(&remoteImageLayer{ + ri: r, + ctx: r.ctx, + digest: m.Config.Digest, + }) +} + +// remoteImageLayer implements partial.CompressedLayer +type remoteImageLayer struct { + ri *remoteImage + ctx context.Context + digest v1.Hash +} + +// Digest implements partial.CompressedLayer +func (rl *remoteImageLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { + urls := []url.URL{rl.ri.fetcher.url("blobs", rl.digest.String())} + + // Add alternative layer sources from URLs (usually none). + d, err := partial.BlobDescriptor(rl, rl.digest) + if err != nil { + return nil, err + } + + if d.Data != nil { + return verify.ReadCloser(io.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest) + } + + // We don't want to log binary layers -- this can break terminals. + ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs") + + for _, s := range d.URLs { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + urls = append(urls, *u) + } + + // The lastErr for most pulls will be the same (the first error), but for + // foreign layers we'll want to surface the last one, since we try to pull + // from the registry first, which would often fail. + // TODO: Maybe we don't want to try pulling from the registry first? + var lastErr error + for _, u := range urls { + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := rl.ri.fetcher.Do(req.WithContext(ctx)) + if err != nil { + lastErr = err + continue + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + lastErr = err + continue + } + + return verify.ReadCloser(resp.Body, d.Size, rl.digest) + } + + return nil, lastErr +} + +// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. +func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) { + return partial.Manifest(rl.ri) +} + +// MediaType implements v1.Layer +func (rl *remoteImageLayer) MediaType() (types.MediaType, error) { + bd, err := partial.BlobDescriptor(rl, rl.digest) + if err != nil { + return "", err + } + + return bd.MediaType, nil +} + +// Size implements partial.CompressedLayer +func (rl *remoteImageLayer) Size() (int64, error) { + // Look up the size of this digest in the manifest to avoid a request. + return partial.BlobSize(rl, rl.digest) +} + +// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below. +func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) { + return partial.ConfigFile(rl.ri) +} + +// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have +// available in our ConfigFile. +func (rl *remoteImageLayer) DiffID() (v1.Hash, error) { + return partial.BlobToDiffID(rl, rl.digest) +} + +// Descriptor retains the original descriptor from an image manifest. +// See partial.Descriptor. +func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) { + return partial.BlobDescriptor(rl, rl.digest) +} + +// See partial.Exists. +func (rl *remoteImageLayer) Exists() (bool, error) { + return rl.ri.fetcher.blobExists(rl.ri.ctx, rl.digest) +} + +// LayerByDigest implements partial.CompressedLayer +func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + return &remoteImageLayer{ + ri: r, + ctx: r.ctx, + digest: h, + }, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go new file mode 100644 index 0000000000..b80972c80e --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go @@ -0,0 +1,287 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "fmt" + "sync" + + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var acceptableIndexMediaTypes = []types.MediaType{ + types.DockerManifestList, + types.OCIImageIndex, +} + +// remoteIndex accesses an index from a remote registry +type remoteIndex struct { + fetcher fetcher + ref name.Reference + ctx context.Context + manifestLock sync.Mutex // Protects manifest + manifest []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +// Index provides access to a remote index reference. +func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) { + desc, err := get(ref, acceptableIndexMediaTypes, options...) + if err != nil { + return nil, err + } + + return desc.ImageIndex() +} + +func (r *remoteIndex) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestList, nil +} + +func (r *remoteIndex) Digest() (v1.Hash, error) { + return partial.Digest(r) +} + +func (r *remoteIndex) Size() (int64, error) { + return partial.Size(r) +} + +func (r *remoteIndex) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + // NOTE(jonjohnsonjr): We should never get here because the public entrypoints + // do type-checking via remote.Descriptor. I've left this here for tests that + // directly instantiate a remoteIndex. + manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableIndexMediaTypes) + if err != nil { + return nil, err + } + + if r.descriptor == nil { + r.descriptor = desc + } + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { + b, err := r.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseIndexManifest(bytes.NewReader(b)) +} + +func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { + desc, err := r.childByHash(h) + if err != nil { + return nil, err + } + + // Descriptor.Image will handle coercing nested indexes into an Image. + return desc.Image() +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) { + // kind of a hack, but RawManifest does appropriate locking/memoization + // and makes sure r.descriptor is populated. + _, err := r.RawManifest() + return r.descriptor, err +} + +func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + desc, err := r.childByHash(h) + if err != nil { + return nil, err + } + return desc.ImageIndex() +} + +// Workaround for #819. +func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + if h == childDesc.Digest { + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: r.fetcher, + ctx: r.ctx, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: r.ref.Context().Digest(h.String()), + }, nil + } + } + return nil, fmt.Errorf("layer not found: %s", h) +} + +func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { + desc, err := r.childByPlatform(platform) + if err != nil { + return nil, err + } + + // Descriptor.Image will handle coercing nested indexes into an Image. + return desc.Image() +} + +// This naively matches the first manifest with matching platform attributes. +// +// We should probably use this instead: +// +// github.com/containerd/containerd/platforms +// +// But first we'd need to migrate to: +// +// github.com/opencontainers/image-spec/specs-go/v1 +func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + // If platform is missing from child descriptor, assume it's amd64/linux. + p := defaultPlatform + if childDesc.Platform != nil { + p = *childDesc.Platform + } + + if matchesPlatform(p, platform) { + return r.childDescriptor(childDesc, platform) + } + } + return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.ref) +} + +func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + if h == childDesc.Digest { + return r.childDescriptor(childDesc, defaultPlatform) + } + } + return nil, fmt.Errorf("no child with digest %s in index %s", h, r.ref) +} + +// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option. +func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) { + ref := r.ref.Context().Digest(child.Digest.String()) + var ( + manifest []byte + err error + ) + if child.Data != nil { + if err := verify.Descriptor(child); err != nil { + return nil, err + } + manifest = child.Data + } else { + manifest, _, err = r.fetcher.fetchManifest(r.ctx, ref, []types.MediaType{child.MediaType}) + if err != nil { + return nil, err + } + } + + if child.MediaType.IsImage() { + mf, _ := v1.ParseManifest(bytes.NewReader(manifest)) + // Failing to parse as a manifest should just be ignored. + // The manifest might not be valid, and that's okay. + if mf != nil && !mf.Config.MediaType.IsConfig() { + child.ArtifactType = string(mf.Config.MediaType) + } + } + + return &Descriptor{ + ref: ref, + ctx: r.ctx, + fetcher: r.fetcher, + Manifest: manifest, + Descriptor: child, + platform: platform, + }, nil +} + +// matchesPlatform checks if the given platform matches the required platforms. +// The given platform matches the required platform if +// - architecture and OS are identical. +// - OS version and variant are identical if provided. +// - features and OS features of the required platform are subsets of those of the given platform. +func matchesPlatform(given, required v1.Platform) bool { + // Required fields that must be identical. + if given.Architecture != required.Architecture || given.OS != required.OS { + return false + } + + // Optional fields that may be empty, but must be identical if provided. + if required.OSVersion != "" && given.OSVersion != required.OSVersion { + return false + } + if required.Variant != "" && given.Variant != required.Variant { + return false + } + + // Verify required platform's features are a subset of given platform's features. + if !isSubset(given.OSFeatures, required.OSFeatures) { + return false + } + if !isSubset(given.Features, required.Features) { + return false + } + + return true +} + +// isSubset checks if the required array of strings is a subset of the given lst. +func isSubset(lst, required []string) bool { + set := make(map[string]bool) + for _, value := range lst { + set[value] = true + } + + for _, value := range required { + if _, ok := set[value]; !ok { + return false + } + } + + return true +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go new file mode 100644 index 0000000000..39c2059502 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go @@ -0,0 +1,77 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "io" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// remoteImagelayer implements partial.CompressedLayer +type remoteLayer struct { + ctx context.Context + fetcher fetcher + digest v1.Hash +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { + // We don't want to log binary layers -- this can break terminals. + ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs") + return rl.fetcher.fetchBlob(ctx, verify.SizeUnknown, rl.digest) +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Size() (int64, error) { + resp, err := rl.fetcher.headBlob(rl.ctx, rl.digest) + if err != nil { + return -1, err + } + defer resp.Body.Close() + return resp.ContentLength, nil +} + +// Digest implements partial.CompressedLayer +func (rl *remoteLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// MediaType implements v1.Layer +func (rl *remoteLayer) MediaType() (types.MediaType, error) { + return types.DockerLayer, nil +} + +// See partial.Exists. +func (rl *remoteLayer) Exists() (bool, error) { + return rl.fetcher.blobExists(rl.ctx, rl.digest) +} + +// Layer reads the given blob reference from a registry as a Layer. A blob +// reference here is just a punned name.Digest where the digest portion is the +// digest of the blob to be read and the repository portion is the repo where +// that blob lives. +func Layer(ref name.Digest, options ...Option) (v1.Layer, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + return newPuller(o).Layer(o.context, ref) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go new file mode 100644 index 0000000000..910d2a94c7 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// ListWithContext calls List with the given context. +// +// Deprecated: Use List and WithContext. This will be removed in a future release. +func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) { + return List(repo, append(options, WithContext(ctx))...) +} + +// List calls /tags/list for the given repository, returning the list of tags +// in the "tags" property. +func List(repo name.Repository, options ...Option) ([]string, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + return newPuller(o).List(o.context, repo) +} + +type Tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` + Next string `json:"next,omitempty"` +} + +func (f *fetcher) listPage(ctx context.Context, repo name.Repository, next string, pageSize int) (*Tags, error) { + if next == "" { + uri := &url.URL{ + Scheme: repo.Scheme(), + Host: repo.RegistryStr(), + Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), + } + if pageSize > 0 { + uri.RawQuery = fmt.Sprintf("n=%d", pageSize) + } + next = uri.String() + } + + req, err := http.NewRequestWithContext(ctx, "GET", next, nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(req) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + parsed := Tags{} + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + + if err := resp.Body.Close(); err != nil { + return nil, err + } + + uri, err := getNextPageURL(resp) + if err != nil { + return nil, err + } + + if uri != nil { + parsed.Next = uri.String() + } + + return &parsed, nil +} + +// getNextPageURL checks if there is a Link header in a http.Response which +// contains a link to the next page. If yes it returns the url.URL of the next +// page otherwise it returns nil. +func getNextPageURL(resp *http.Response) (*url.URL, error) { + link := resp.Header.Get("Link") + if link == "" { + return nil, nil + } + + if link[0] != '<' { + return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link) + } + + end := strings.Index(link, ">") + if end == -1 { + return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link) + } + link = link[1:end] + + linkURL, err := url.Parse(link) + if err != nil { + return nil, err + } + if resp.Request == nil || resp.Request.URL == nil { + return nil, nil + } + linkURL = resp.Request.URL.ResolveReference(linkURL) + return linkURL, nil +} + +type Lister struct { + f *fetcher + repo name.Repository + pageSize int + + page *Tags + err error + + needMore bool +} + +func (l *Lister) Next(ctx context.Context) (*Tags, error) { + if l.needMore { + l.page, l.err = l.f.listPage(ctx, l.repo, l.page.Next, l.pageSize) + } else { + l.needMore = true + } + return l.page, l.err +} + +func (l *Lister) HasNext() bool { + return l.page != nil && (!l.needMore || l.page.Next != "") +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go new file mode 100644 index 0000000000..36d088567d --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" +) + +// MountableLayer wraps a v1.Layer in a shim that enables the layer to be +// "mounted" when published to another registry. +type MountableLayer struct { + v1.Layer + + Reference name.Reference +} + +// Descriptor retains the original descriptor from an image manifest. +// See partial.Descriptor. +func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) { + return partial.Descriptor(ml.Layer) +} + +// Exists is a hack. See partial.Exists. +func (ml *MountableLayer) Exists() (bool, error) { + return partial.Exists(ml.Layer) +} + +// mountableImage wraps the v1.Layer references returned by the embedded v1.Image +// in MountableLayer's so that remote.Write might attempt to mount them from their +// source repository. +type mountableImage struct { + v1.Image + + Reference name.Reference +} + +// Layers implements v1.Image +func (mi *mountableImage) Layers() ([]v1.Layer, error) { + ls, err := mi.Image.Layers() + if err != nil { + return nil, err + } + mls := make([]v1.Layer, 0, len(ls)) + for _, l := range ls { + mls = append(mls, &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }) + } + return mls, nil +} + +// LayerByDigest implements v1.Image +func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDigest(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} + +// LayerByDiffID implements v1.Image +func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDiffID(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) { + return partial.Descriptor(mi.Image) +} + +// ConfigLayer retains the original reference so that it can be mounted. +// See partial.ConfigLayer. +func (mi *mountableImage) ConfigLayer() (v1.Layer, error) { + l, err := partial.ConfigLayer(mi.Image) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go new file mode 100644 index 0000000000..a6705de895 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go @@ -0,0 +1,46 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "github.com/google/go-containerregistry/pkg/name" + "golang.org/x/sync/errgroup" +) + +// MultiWrite writes the given Images or ImageIndexes to the given refs, as +// efficiently as possible, by deduping shared layer blobs while uploading them +// in parallel. +func MultiWrite(todo map[name.Reference]Taggable, options ...Option) (rerr error) { + o, err := makeOptions(options...) + if err != nil { + return err + } + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() + } + p := newPusher(o) + + g, ctx := errgroup.WithContext(o.context) + g.SetLimit(o.jobs) + + for ref, t := range todo { + ref, t := ref, t + g.Go(func() error { + return p.Push(ctx, ref, t) + }) + } + + return g.Wait() +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go new file mode 100644 index 0000000000..15b7da1e41 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -0,0 +1,354 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "errors" + "io" + "net" + "net/http" + "syscall" + "time" + + "github.com/google/go-containerregistry/internal/retry" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// Option is a functional option for remote operations. +type Option func(*options) error + +type options struct { + auth authn.Authenticator + keychain authn.Keychain + transport http.RoundTripper + context context.Context + jobs int + userAgent string + allowNondistributableArtifacts bool + progress *progress + retryBackoff Backoff + retryPredicate retry.Predicate + retryStatusCodes []int + + // Only these options can overwrite Reuse()d options. + platform v1.Platform + pageSize int + filter map[string]string + + // Set by Reuse, we currently store one or the other. + puller *Puller + pusher *Pusher +} + +var defaultPlatform = v1.Platform{ + Architecture: "amd64", + OS: "linux", +} + +// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib +type Backoff = retry.Backoff + +var defaultRetryPredicate retry.Predicate = func(err error) bool { + // Various failure modes here, as we're often reading from and writing to + // the network. + if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, net.ErrClosed) { + logs.Warn.Printf("retrying %v", err) + return true + } + return false +} + +// Try this three times, waiting 1s after first failure, 3s after second. +var defaultRetryBackoff = Backoff{ + Duration: 1.0 * time.Second, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, +} + +// Useful for tests +var fastBackoff = Backoff{ + Duration: 1.0 * time.Millisecond, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, +} + +var defaultRetryStatusCodes = []int{ + http.StatusRequestTimeout, + http.StatusInternalServerError, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout, + 499, // nginx-specific, client closed request + 522, // Cloudflare-specific, connection timeout +} + +const ( + defaultJobs = 4 + + // ECR returns an error if n > 1000: + // https://github.com/google/go-containerregistry/issues/1091 + defaultPageSize = 1000 +) + +// DefaultTransport is based on http.DefaultTransport with modifications +// documented inline below. +var DefaultTransport http.RoundTripper = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + // We usually are dealing with 2 hosts (at most), split MaxIdleConns between them. + MaxIdleConnsPerHost: 50, +} + +func makeOptions(opts ...Option) (*options, error) { + o := &options{ + transport: DefaultTransport, + platform: defaultPlatform, + context: context.Background(), + jobs: defaultJobs, + pageSize: defaultPageSize, + retryPredicate: defaultRetryPredicate, + retryBackoff: defaultRetryBackoff, + retryStatusCodes: defaultRetryStatusCodes, + } + + for _, option := range opts { + if err := option(o); err != nil { + return nil, err + } + } + + switch { + case o.auth != nil && o.keychain != nil: + // It is a better experience to explicitly tell a caller their auth is misconfigured + // than potentially fail silently when the correct auth is overridden by option misuse. + return nil, errors.New("provide an option for either authn.Authenticator or authn.Keychain, not both") + case o.auth == nil: + o.auth = authn.Anonymous + } + + // transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping. + // This is to allow consumers full control over the transports logic, such as providing retry logic. + if _, ok := o.transport.(*transport.Wrapper); !ok { + // Wrap the transport in something that logs requests and responses. + // It's expensive to generate the dumps, so skip it if we're writing + // to nothing. + if logs.Enabled(logs.Debug) { + o.transport = transport.NewLogger(o.transport) + } + + // Using customized retry predicate if provided, and fallback to default if not. + predicate := o.retryPredicate + if predicate == nil { + predicate = defaultRetryPredicate + } + + // Wrap the transport in something that can retry network flakes. + o.transport = transport.NewRetry(o.transport, transport.WithRetryBackoff(o.retryBackoff), transport.WithRetryPredicate(predicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) + // Wrap this last to prevent transport.New from double-wrapping. + if o.userAgent != "" { + o.transport = transport.NewUserAgent(o.transport, o.userAgent) + } + } + + return o, nil +} + +// WithTransport is a functional option for overriding the default transport +// for remote operations. +// If transport.Wrapper is provided, this signals that the consumer does *not* want any further wrapping to occur. +// i.e. logging, retry and useragent +// +// The default transport is DefaultTransport. +func WithTransport(t http.RoundTripper) Option { + return func(o *options) error { + o.transport = t + return nil + } +} + +// WithAuth is a functional option for overriding the default authenticator +// for remote operations. +// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set. +// +// The default authenticator is authn.Anonymous. +func WithAuth(auth authn.Authenticator) Option { + return func(o *options) error { + o.auth = auth + return nil + } +} + +// WithAuthFromKeychain is a functional option for overriding the default +// authenticator for remote operations, using an authn.Keychain to find +// credentials. +// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set. +// +// The default authenticator is authn.Anonymous. +func WithAuthFromKeychain(keys authn.Keychain) Option { + return func(o *options) error { + o.keychain = keys + return nil + } +} + +// WithPlatform is a functional option for overriding the default platform +// that Image and Descriptor.Image use for resolving an index to an image. +// +// The default platform is amd64/linux. +func WithPlatform(p v1.Platform) Option { + return func(o *options) error { + o.platform = p + return nil + } +} + +// WithContext is a functional option for setting the context in http requests +// performed by a given function. Note that this context is used for _all_ +// http requests, not just the initial volley. E.g., for remote.Image, the +// context will be set on http requests generated by subsequent calls to +// RawConfigFile() and even methods on layers returned by Layers(). +// +// The default context is context.Background(). +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.context = ctx + return nil + } +} + +// WithJobs is a functional option for setting the parallelism of remote +// operations performed by a given function. Note that not all remote +// operations support parallelism. +// +// The default value is 4. +func WithJobs(jobs int) Option { + return func(o *options) error { + if jobs <= 0 { + return errors.New("jobs must be greater than zero") + } + o.jobs = jobs + return nil + } +} + +// WithUserAgent adds the given string to the User-Agent header for any HTTP +// requests. This header will also include "go-containerregistry/${version}". +// +// If you want to completely overwrite the User-Agent header, use WithTransport. +func WithUserAgent(ua string) Option { + return func(o *options) error { + o.userAgent = ua + return nil + } +} + +// WithNondistributable includes non-distributable (foreign) layers +// when writing images, see: +// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers +// +// The default behaviour is to skip these layers +func WithNondistributable(o *options) error { + o.allowNondistributableArtifacts = true + return nil +} + +// WithProgress takes a channel that will receive progress updates as bytes are written. +// +// Sending updates to an unbuffered channel will block writes, so callers +// should provide a buffered channel to avoid potential deadlocks. +func WithProgress(updates chan<- v1.Update) Option { + return func(o *options) error { + o.progress = &progress{updates: updates} + o.progress.lastUpdate = &v1.Update{} + return nil + } +} + +// WithPageSize sets the given size as the value of parameter 'n' in the request. +// +// To omit the `n` parameter entirely, use WithPageSize(0). +// The default value is 1000. +func WithPageSize(size int) Option { + return func(o *options) error { + o.pageSize = size + return nil + } +} + +// WithRetryBackoff sets the httpBackoff for retry HTTP operations. +func WithRetryBackoff(backoff Backoff) Option { + return func(o *options) error { + o.retryBackoff = backoff + return nil + } +} + +// WithRetryPredicate sets the predicate for retry HTTP operations. +func WithRetryPredicate(predicate retry.Predicate) Option { + return func(o *options) error { + o.retryPredicate = predicate + return nil + } +} + +// WithRetryStatusCodes sets which http response codes will be retried. +func WithRetryStatusCodes(codes ...int) Option { + return func(o *options) error { + o.retryStatusCodes = codes + return nil + } +} + +// WithFilter sets the filter querystring for HTTP operations. +func WithFilter(key string, value string) Option { + return func(o *options) error { + if o.filter == nil { + o.filter = map[string]string{} + } + o.filter[key] = value + return nil + } +} + +// Reuse takes a Puller or Pusher and reuses it for remote interactions +// rather than starting from a clean slate. For example, it will reuse token exchanges +// when possible and avoid sending redundant HEAD requests. +// +// Reuse will take precedence over other options passed to most remote functions because +// most options deal with setting up auth and transports, which Reuse intetionally skips. +func Reuse[I *Puller | *Pusher](i I) Option { + return func(o *options) error { + if puller, ok := any(i).(*Puller); ok { + o.puller = puller + } else if pusher, ok := any(i).(*Pusher); ok { + o.pusher = pusher + } + return nil + } +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go new file mode 100644 index 0000000000..fe60c8c353 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go @@ -0,0 +1,76 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "io" + "sync" + "sync/atomic" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +type progress struct { + sync.Mutex + updates chan<- v1.Update + lastUpdate *v1.Update +} + +func (p *progress) total(delta int64) { + p.Lock() + defer p.Unlock() + atomic.AddInt64(&p.lastUpdate.Total, delta) +} + +func (p *progress) complete(delta int64) { + p.Lock() + defer p.Unlock() + p.updates <- v1.Update{ + Total: p.lastUpdate.Total, + Complete: atomic.AddInt64(&p.lastUpdate.Complete, delta), + } +} + +func (p *progress) err(err error) error { + if err != nil && p.updates != nil { + p.updates <- v1.Update{Error: err} + } + return err +} + +func (p *progress) Close(err error) { + _ = p.err(err) + close(p.updates) +} + +type progressReader struct { + rc io.ReadCloser + + count *int64 // number of bytes this reader has read, to support resetting on retry. + progress *progress +} + +func (r *progressReader) Read(b []byte) (int, error) { + n, err := r.rc.Read(b) + if err != nil { + return n, err + } + atomic.AddInt64(r.count, int64(n)) + // TODO: warn/debug log if sending takes too long, or if sending is blocked while context is canceled. + r.progress.complete(int64(n)) + return n, nil +} + +func (r *progressReader) Close() error { return r.rc.Close() } diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go new file mode 100644 index 0000000000..7da8017eec --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go @@ -0,0 +1,222 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "sync" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type Puller struct { + o *options + + // map[resource]*reader + readers sync.Map +} + +func NewPuller(options ...Option) (*Puller, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + + return newPuller(o), nil +} + +func newPuller(o *options) *Puller { + if o.puller != nil { + return o.puller + } + return &Puller{ + o: o, + } +} + +type reader struct { + // in + target resource + o *options + + // f() + once sync.Once + + // out + f *fetcher + err error +} + +// this will run once per reader instance +func (r *reader) init(ctx context.Context) error { + r.once.Do(func() { + r.f, r.err = makeFetcher(ctx, r.target, r.o) + }) + return r.err +} + +func (p *Puller) fetcher(ctx context.Context, target resource) (*fetcher, error) { + v, _ := p.readers.LoadOrStore(target, &reader{ + target: target, + o: p.o, + }) + rr := v.(*reader) + return rr.f, rr.init(ctx) +} + +// Head is like remote.Head, but avoids re-authenticating when possible. +func (p *Puller) Head(ctx context.Context, ref name.Reference) (*v1.Descriptor, error) { + f, err := p.fetcher(ctx, ref.Context()) + if err != nil { + return nil, err + } + + return f.headManifest(ctx, ref, allManifestMediaTypes) +} + +// Get is like remote.Get, but avoids re-authenticating when possible. +func (p *Puller) Get(ctx context.Context, ref name.Reference) (*Descriptor, error) { + return p.get(ctx, ref, allManifestMediaTypes, p.o.platform) +} + +func (p *Puller) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) { + f, err := p.fetcher(ctx, ref.Context()) + if err != nil { + return nil, err + } + return f.get(ctx, ref, acceptable, platform) +} + +// Layer is like remote.Layer, but avoids re-authenticating when possible. +func (p *Puller) Layer(ctx context.Context, ref name.Digest) (v1.Layer, error) { + f, err := p.fetcher(ctx, ref.Context()) + if err != nil { + return nil, err + } + + h, err := v1.NewHash(ref.Identifier()) + if err != nil { + return nil, err + } + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: *f, + ctx: ctx, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: ref, + }, nil +} + +// List lists tags in a repo and handles pagination, returning the full list of tags. +func (p *Puller) List(ctx context.Context, repo name.Repository) ([]string, error) { + lister, err := p.Lister(ctx, repo) + if err != nil { + return nil, err + } + + tagList := []string{} + for lister.HasNext() { + tags, err := lister.Next(ctx) + if err != nil { + return nil, err + } + tagList = append(tagList, tags.Tags...) + } + + return tagList, nil +} + +// Lister lists tags in a repo and returns a Lister for paginating through the results. +func (p *Puller) Lister(ctx context.Context, repo name.Repository) (*Lister, error) { + return p.lister(ctx, repo, p.o.pageSize) +} + +func (p *Puller) lister(ctx context.Context, repo name.Repository, pageSize int) (*Lister, error) { + f, err := p.fetcher(ctx, repo) + if err != nil { + return nil, err + } + page, err := f.listPage(ctx, repo, "", pageSize) + if err != nil { + return nil, err + } + return &Lister{ + f: f, + repo: repo, + pageSize: pageSize, + page: page, + err: err, + }, nil +} + +// Catalog lists repos in a registry and handles pagination, returning the full list of repos. +func (p *Puller) Catalog(ctx context.Context, reg name.Registry) ([]string, error) { + return p.catalog(ctx, reg, p.o.pageSize) +} + +func (p *Puller) catalog(ctx context.Context, reg name.Registry, pageSize int) ([]string, error) { + catalogger, err := p.catalogger(ctx, reg, pageSize) + if err != nil { + return nil, err + } + repoList := []string{} + for catalogger.HasNext() { + repos, err := catalogger.Next(ctx) + if err != nil { + return nil, err + } + repoList = append(repoList, repos.Repos...) + } + return repoList, nil +} + +// Catalogger lists repos in a registry and returns a Catalogger for paginating through the results. +func (p *Puller) Catalogger(ctx context.Context, reg name.Registry) (*Catalogger, error) { + return p.catalogger(ctx, reg, p.o.pageSize) +} + +func (p *Puller) catalogger(ctx context.Context, reg name.Registry, pageSize int) (*Catalogger, error) { + f, err := p.fetcher(ctx, reg) + if err != nil { + return nil, err + } + page, err := f.catalogPage(ctx, reg, "", pageSize) + if err != nil { + return nil, err + } + return &Catalogger{ + f: f, + reg: reg, + pageSize: pageSize, + page: page, + err: err, + }, nil +} + +func (p *Puller) referrers(ctx context.Context, d name.Digest, filter map[string]string) (v1.ImageIndex, error) { + f, err := p.fetcher(ctx, d.Context()) + if err != nil { + return nil, err + } + return f.fetchReferrers(ctx, filter, d) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go new file mode 100644 index 0000000000..47e3b806ee --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go @@ -0,0 +1,573 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "net/url" + "sync" + + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +type manifest interface { + Taggable + partial.Describable +} + +// key is either v1.Hash or v1.Layer (for stream.Layer) +type workers struct { + // map[v1.Hash|v1.Layer]*sync.Once + onces sync.Map + + // map[v1.Hash|v1.Layer]error + errors sync.Map +} + +func nop() error { + return nil +} + +func (w *workers) err(digest v1.Hash) error { + v, ok := w.errors.Load(digest) + if !ok || v == nil { + return nil + } + return v.(error) +} + +func (w *workers) Do(digest v1.Hash, f func() error) error { + // We don't care if it was loaded or not because the sync.Once will do it for us. + once, _ := w.onces.LoadOrStore(digest, &sync.Once{}) + + once.(*sync.Once).Do(func() { + w.errors.Store(digest, f()) + }) + + err := w.err(digest) + if err != nil { + // Allow this to be retried by another caller. + w.onces.Delete(digest) + } + return err +} + +func (w *workers) Stream(layer v1.Layer, f func() error) error { + // We don't care if it was loaded or not because the sync.Once will do it for us. + once, _ := w.onces.LoadOrStore(layer, &sync.Once{}) + + once.(*sync.Once).Do(func() { + w.errors.Store(layer, f()) + }) + + v, ok := w.errors.Load(layer) + if !ok || v == nil { + return nil + } + + return v.(error) +} + +type Pusher struct { + o *options + + // map[name.Repository]*repoWriter + writers sync.Map +} + +func NewPusher(options ...Option) (*Pusher, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + + return newPusher(o), nil +} + +func newPusher(o *options) *Pusher { + if o.pusher != nil { + return o.pusher + } + return &Pusher{ + o: o, + } +} + +func (p *Pusher) writer(ctx context.Context, repo name.Repository, o *options) (*repoWriter, error) { + v, _ := p.writers.LoadOrStore(repo, &repoWriter{ + repo: repo, + o: o, + }) + rw := v.(*repoWriter) + return rw, rw.init(ctx) +} + +func (p *Pusher) Put(ctx context.Context, ref name.Reference, t Taggable) error { + w, err := p.writer(ctx, ref.Context(), p.o) + if err != nil { + return err + } + + m, err := taggableToManifest(t) + if err != nil { + return err + } + + return w.commitManifest(ctx, ref, m) +} + +func (p *Pusher) Push(ctx context.Context, ref name.Reference, t Taggable) error { + w, err := p.writer(ctx, ref.Context(), p.o) + if err != nil { + return err + } + return w.writeManifest(ctx, ref, t) +} + +func (p *Pusher) Upload(ctx context.Context, repo name.Repository, l v1.Layer) error { + w, err := p.writer(ctx, repo, p.o) + if err != nil { + return err + } + return w.writeLayer(ctx, l) +} + +func (p *Pusher) Delete(ctx context.Context, ref name.Reference) error { + w, err := p.writer(ctx, ref.Context(), p.o) + if err != nil { + return err + } + + u := url.URL{ + Scheme: ref.Context().Scheme(), + Host: ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), + } + + req, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return err + } + + resp, err := w.w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) + + // TODO(jason): If the manifest had a `subject`, and if the registry + // doesn't support Referrers, update the index pointed to by the + // subject's fallback tag to remove the descriptor for this manifest. +} + +type repoWriter struct { + repo name.Repository + o *options + once sync.Once + + w *writer + err error + + work *workers +} + +// this will run once per repoWriter instance +func (rw *repoWriter) init(ctx context.Context) error { + rw.once.Do(func() { + rw.work = &workers{} + rw.w, rw.err = makeWriter(ctx, rw.repo, nil, rw.o) + }) + return rw.err +} + +func (rw *repoWriter) writeDeps(ctx context.Context, m manifest) error { + if img, ok := m.(v1.Image); ok { + return rw.writeLayers(ctx, img) + } + + if idx, ok := m.(v1.ImageIndex); ok { + return rw.writeChildren(ctx, idx) + } + + // This has no deps, not an error (e.g. something you want to just PUT). + return nil +} + +type describable struct { + desc v1.Descriptor +} + +func (d describable) Digest() (v1.Hash, error) { + return d.desc.Digest, nil +} + +func (d describable) Size() (int64, error) { + return d.desc.Size, nil +} + +func (d describable) MediaType() (types.MediaType, error) { + return d.desc.MediaType, nil +} + +type tagManifest struct { + Taggable + partial.Describable +} + +func taggableToManifest(t Taggable) (manifest, error) { + if m, ok := t.(manifest); ok { + return m, nil + } + + if d, ok := t.(*Descriptor); ok { + if d.MediaType.IsIndex() { + return d.ImageIndex() + } + + if d.MediaType.IsImage() { + return d.Image() + } + + if d.MediaType.IsSchema1() { + return d.Schema1() + } + + return tagManifest{t, describable{d.toDesc()}}, nil + } + + desc := v1.Descriptor{ + // A reasonable default if Taggable doesn't implement MediaType. + MediaType: types.DockerManifestSchema2, + } + + b, err := t.RawManifest() + if err != nil { + return nil, err + } + + if wmt, ok := t.(withMediaType); ok { + desc.MediaType, err = wmt.MediaType() + if err != nil { + return nil, err + } + } + + desc.Digest, desc.Size, err = v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + return tagManifest{t, describable{desc}}, nil +} + +func (rw *repoWriter) writeManifest(ctx context.Context, ref name.Reference, t Taggable) error { + m, err := taggableToManifest(t) + if err != nil { + return err + } + + needDeps := true + + digest, err := m.Digest() + if errors.Is(err, stream.ErrNotComputed) { + if err := rw.writeDeps(ctx, m); err != nil { + return err + } + + needDeps = false + + digest, err = m.Digest() + if err != nil { + return err + } + } else if err != nil { + return err + } + + // This may be a lazy child where we have no ref until digest is computed. + if ref == nil { + ref = rw.repo.Digest(digest.String()) + } + + // For tags, we want to do this check outside of our Work.Do closure because + // we don't want to dedupe based on the manifest digest. + _, byTag := ref.(name.Tag) + if byTag { + if exists, err := rw.manifestExists(ctx, ref, t); err != nil { + return err + } else if exists { + return nil + } + } + + // The following work.Do will get deduped by digest, so it won't happen unless + // this tag happens to be the first commitManifest to run for that digest. + needPut := byTag + + if err := rw.work.Do(digest, func() error { + if !byTag { + if exists, err := rw.manifestExists(ctx, ref, t); err != nil { + return err + } else if exists { + return nil + } + } + + if needDeps { + if err := rw.writeDeps(ctx, m); err != nil { + return err + } + } + + needPut = false + return rw.commitManifest(ctx, ref, m) + }); err != nil { + return err + } + + if !needPut { + return nil + } + + // Only runs for tags that got deduped by digest. + return rw.commitManifest(ctx, ref, m) +} + +func (rw *repoWriter) writeChildren(ctx context.Context, idx v1.ImageIndex) error { + children, err := partial.Manifests(idx) + if err != nil { + return err + } + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(rw.o.jobs) + + for _, child := range children { + child := child + if err := rw.writeChild(ctx, child, g); err != nil { + return err + } + } + + return g.Wait() +} + +func (rw *repoWriter) writeChild(ctx context.Context, child partial.Describable, g *errgroup.Group) error { + switch child := child.(type) { + case v1.ImageIndex: + // For recursive index, we want to do a depth-first launching of goroutines + // to avoid deadlocking. + // + // Note that this is rare, so the impact of this should be really small. + return rw.writeManifest(ctx, nil, child) + case v1.Image: + g.Go(func() error { + return rw.writeManifest(ctx, nil, child) + }) + case v1.Layer: + g.Go(func() error { + return rw.writeLayer(ctx, child) + }) + default: + // This can't happen. + return fmt.Errorf("encountered unknown child: %T", child) + } + return nil +} + +// TODO: Consider caching some representation of the tags/digests in the destination +// repository as a hint to avoid this optimistic check in cases where we will most +// likely have to do a PUT anyway, e.g. if we are overwriting a tag we just wrote. +func (rw *repoWriter) manifestExists(ctx context.Context, ref name.Reference, t Taggable) (bool, error) { + f := &fetcher{ + target: ref.Context(), + client: rw.w.client, + } + + m, err := taggableToManifest(t) + if err != nil { + return false, err + } + + digest, err := m.Digest() + if err != nil { + // Possibly due to streaming layers. + return false, nil + } + got, err := f.headManifest(ctx, ref, allManifestMediaTypes) + if err != nil { + var terr *transport.Error + if errors.As(err, &terr) { + if terr.StatusCode == http.StatusNotFound { + return false, nil + } + + // We treat a 403 here as non-fatal because this existence check is an optimization and + // some registries will return a 403 instead of a 404 in certain situations. + // E.g. https://jfrog.atlassian.net/browse/RTFACT-13797 + if terr.StatusCode == http.StatusForbidden { + logs.Debug.Printf("manifestExists unexpected 403: %v", err) + return false, nil + } + } + + return false, err + } + + if digest != got.Digest { + // Mark that we saw this digest in the registry so we don't have to check it again. + rw.work.Do(got.Digest, nop) + + return false, nil + } + + if tag, ok := ref.(name.Tag); ok { + logs.Progress.Printf("existing manifest: %s@%s", tag.Identifier(), got.Digest) + } else { + logs.Progress.Print("existing manifest: ", got.Digest) + } + + return true, nil +} + +func (rw *repoWriter) commitManifest(ctx context.Context, ref name.Reference, m manifest) error { + if rw.o.progress != nil { + size, err := m.Size() + if err != nil { + return err + } + rw.o.progress.total(size) + } + + return rw.w.commitManifest(ctx, m, ref) +} + +func (rw *repoWriter) writeLayers(pctx context.Context, img v1.Image) error { + ls, err := img.Layers() + if err != nil { + return err + } + + g, ctx := errgroup.WithContext(pctx) + g.SetLimit(rw.o.jobs) + + for _, l := range ls { + l := l + + g.Go(func() error { + return rw.writeLayer(ctx, l) + }) + } + + mt, err := img.MediaType() + if err != nil { + return err + } + + if mt.IsSchema1() { + return g.Wait() + } + + cl, err := partial.ConfigLayer(img) + if errors.Is(err, stream.ErrNotComputed) { + if err := g.Wait(); err != nil { + return err + } + + cl, err := partial.ConfigLayer(img) + if err != nil { + return err + } + + return rw.writeLayer(pctx, cl) + } else if err != nil { + return err + } + + g.Go(func() error { + return rw.writeLayer(ctx, cl) + }) + + return g.Wait() +} + +func (rw *repoWriter) writeLayer(ctx context.Context, l v1.Layer) error { + // Skip any non-distributable things. + mt, err := l.MediaType() + if err != nil { + return err + } + if !mt.IsDistributable() && !rw.o.allowNondistributableArtifacts { + return nil + } + + digest, err := l.Digest() + if err != nil { + if errors.Is(err, stream.ErrNotComputed) { + return rw.lazyWriteLayer(ctx, l) + } + return err + } + + return rw.work.Do(digest, func() error { + if rw.o.progress != nil { + size, err := l.Size() + if err != nil { + return err + } + rw.o.progress.total(size) + } + return rw.w.uploadOne(ctx, l) + }) +} + +func (rw *repoWriter) lazyWriteLayer(ctx context.Context, l v1.Layer) error { + return rw.work.Stream(l, func() error { + if err := rw.w.uploadOne(ctx, l); err != nil { + return err + } + + // Mark this upload completed. + digest, err := l.Digest() + if err != nil { + return err + } + + rw.work.Do(digest, nop) + + if rw.o.progress != nil { + size, err := l.Size() + if err != nil { + return err + } + rw.o.progress.total(size) + } + + return nil + }) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go new file mode 100644 index 0000000000..4bc6f70a85 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go @@ -0,0 +1,117 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Referrers returns a list of descriptors that refer to the given manifest digest. +// +// The subject manifest doesn't have to exist in the registry for there to be descriptors that refer to it. +func Referrers(d name.Digest, options ...Option) (v1.ImageIndex, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + return newPuller(o).referrers(o.context, d, o.filter) +} + +// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema +func fallbackTag(d name.Digest) name.Tag { + return d.Context().Tag(strings.Replace(d.DigestStr(), ":", "-", 1)) +} + +func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, d name.Digest) (v1.ImageIndex, error) { + // Check the Referrers API endpoint first. + u := f.url("referrers", d.DigestStr()) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", string(types.OCIImageIndex)) + + resp, err := f.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest, http.StatusNotAcceptable); err != nil { + return nil, err + } + + var b []byte + if resp.StatusCode == http.StatusOK && resp.Header.Get("Content-Type") == string(types.OCIImageIndex) { + b, err = io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + } else { + // The registry doesn't support the Referrers API endpoint, so we'll use the fallback tag scheme. + b, _, err = f.fetchManifest(ctx, fallbackTag(d), []types.MediaType{types.OCIImageIndex}) + var terr *transport.Error + if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound { + // Not found just means there are no attachments yet. Start with an empty manifest. + return empty.Index, nil + } else if err != nil { + return nil, err + } + } + + h, sz, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + idx := &remoteIndex{ + fetcher: *f, + ctx: ctx, + manifest: b, + mediaType: types.OCIImageIndex, + descriptor: &v1.Descriptor{ + Digest: h, + MediaType: types.OCIImageIndex, + Size: sz, + }, + } + return filterReferrersResponse(filter, idx), nil +} + +// If filter applied, filter out by artifactType. +// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers +func filterReferrersResponse(filter map[string]string, in v1.ImageIndex) v1.ImageIndex { + if filter == nil { + return in + } + v, ok := filter["artifactType"] + if !ok { + return in + } + return mutate.RemoveManifests(in, func(desc v1.Descriptor) bool { + return desc.ArtifactType != v + }) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go new file mode 100644 index 0000000000..4bc1c4c457 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go @@ -0,0 +1,118 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type schema1 struct { + ref name.Reference + ctx context.Context + fetcher fetcher + manifest []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +func (s *schema1) Layers() ([]v1.Layer, error) { + m := schema1Manifest{} + if err := json.NewDecoder(bytes.NewReader(s.manifest)).Decode(&m); err != nil { + return nil, err + } + + layers := []v1.Layer{} + for i := len(m.FSLayers) - 1; i >= 0; i-- { + fsl := m.FSLayers[i] + + h, err := v1.NewHash(fsl.BlobSum) + if err != nil { + return nil, err + } + l, err := s.LayerByDigest(h) + if err != nil { + return nil, err + } + layers = append(layers, l) + } + + return layers, nil +} + +func (s *schema1) MediaType() (types.MediaType, error) { + return s.mediaType, nil +} + +func (s *schema1) Size() (int64, error) { + return s.descriptor.Size, nil +} + +func (s *schema1) ConfigName() (v1.Hash, error) { + return partial.ConfigName(s) +} + +func (s *schema1) ConfigFile() (*v1.ConfigFile, error) { + return nil, newErrSchema1(s.mediaType) +} + +func (s *schema1) RawConfigFile() ([]byte, error) { + return []byte("{}"), nil +} + +func (s *schema1) Digest() (v1.Hash, error) { + return s.descriptor.Digest, nil +} + +func (s *schema1) Manifest() (*v1.Manifest, error) { + return nil, newErrSchema1(s.mediaType) +} + +func (s *schema1) RawManifest() ([]byte, error) { + return s.manifest, nil +} + +func (s *schema1) LayerByDigest(h v1.Hash) (v1.Layer, error) { + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: s.fetcher, + ctx: s.ctx, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: s.ref.Context().Digest(h.String()), + }, nil +} + +func (s *schema1) LayerByDiffID(v1.Hash) (v1.Layer, error) { + return nil, newErrSchema1(s.mediaType) +} + +type fslayer struct { + BlobSum string `json:"blobSum"` +} + +type schema1Manifest struct { + FSLayers []fslayer `json:"fsLayers"` +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md new file mode 100644 index 0000000000..bd4d957b0e --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md @@ -0,0 +1,129 @@ +# `transport` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport) + +The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**. + +This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper) +that transparently performs: +* [Token +Authentication](https://docs.docker.com/registry/spec/auth/token/) and +* [OAuth2 +Authentication](https://docs.docker.com/registry/spec/auth/oauth/) + +for registry clients. + +## Raison d'être + +> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client? + +Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang). + +As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large. + +![docker/distribution](../../../../images/docker.dot.svg) + +> Why does it matter if you depend on prometheus? Who cares? + +It's generally polite to your downstream to reduce the number of dependencies your package requires: + +* Downloading your package is faster, which helps our Australian friends and people on airplanes. +* There is less code to compile, which speeds up builds and saves the planet from global warming. +* You reduce the likelihood of inflicting dependency hell upon your consumers. +* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy. + +> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)? + +Similar reasons! That ends up pulling in grpc, protobuf, and logrus. + +![containerd/containerd](../../../../images/containerd.dot.svg) + +> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)? + +That just uses the the `docker/distribution` client... and more! + +![containers/image](../../../../images/containers.dot.svg) + +> Wow, what about this package? + +Of course, this package isn't perfect either. `transport` depends on `authn`, +which in turn depends on docker's config file parsing and handling package, +which you don't strictly need but almost certainly want if you're going to be +interacting with a registry. + +![google/go-containerregistry](../../../../images/ggcr.dot.svg) + +*These graphs were generated by +[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).* + +## Usage + +This is heavily used by the +[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) +package, which implements higher level image-centric functionality, but this +package is useful if you want to interact directly with the registry to do +something that `remote` doesn't support, e.g. [to handle with schema 1 +images](https://github.com/google/go-containerregistry/pull/509). + +This package also includes some [error +handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors) +facilities in the form of +[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError), +which will parse the response body into a structured error for unexpected http +status codes. + +Here's a "simple" program that writes the result of +[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags) +for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause) +to stdout. + +```go +package main + +import ( + "io" + "net/http" + "os" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +func main() { + repo, err := name.NewRepository("gcr.io/google-containers/pause") + if err != nil { + panic(err) + } + + // Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG. + auth, err := authn.DefaultKeychain.Resolve(repo.Registry) + if err != nil { + panic(err) + } + + // Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause. + scopes := []string{repo.Scope(transport.PullScope)} + t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes) + if err != nil { + panic(err) + } + client := &http.Client{Transport: t} + + // Make the actual request. + resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list") + if err != nil { + panic(err) + } + + // Assert that we get a 200, otherwise attempt to parse body as a structured error. + if err := transport.CheckError(resp, http.StatusOK); err != nil { + panic(err) + } + + // Write the response to stdout. + if _, err := io.Copy(os.Stdout, resp.Body); err != nil { + panic(err) + } +} +``` diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go new file mode 100644 index 0000000000..f2452469d2 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go @@ -0,0 +1,62 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/base64" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" +) + +type basicTransport struct { + inner http.RoundTripper + auth authn.Authenticator + target string +} + +var _ http.RoundTripper = (*basicTransport)(nil) + +// RoundTrip implements http.RoundTripper +func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { + if bt.auth != authn.Anonymous { + auth, err := authn.Authorization(in.Context(), bt.auth) + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the host with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if in.Host == bt.target || in.URL.Host == bt.target { + if bearer := auth.RegistryToken; bearer != "" { + hdr := fmt.Sprintf("Bearer %s", bearer) + in.Header.Set("Authorization", hdr) + } else if user, pass := auth.Username, auth.Password; user != "" && pass != "" { + delimited := fmt.Sprintf("%s:%s", user, pass) + encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) + hdr := fmt.Sprintf("Basic %s", encoded) + in.Header.Set("Authorization", hdr) + } else if token := auth.Auth; token != "" { + hdr := fmt.Sprintf("Basic %s", token) + in.Header.Set("Authorization", hdr) + } + } + } + return bt.inner.RoundTrip(in) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go new file mode 100644 index 0000000000..ea652d4ae8 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -0,0 +1,407 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + "sync" + + authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" +) + +type Token struct { + Token string `json:"token"` + AccessToken string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` +} + +// Exchange requests a registry Token with the given scopes. +func Exchange(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string, pr *Challenge) (*Token, error) { + if strings.ToLower(pr.Scheme) != "bearer" { + // TODO: Pretend token for basic? + return nil, fmt.Errorf("challenge scheme %q is not bearer", pr.Scheme) + } + bt, err := fromChallenge(reg, auth, t, pr, scopes...) + if err != nil { + return nil, err + } + authcfg, err := authn.Authorization(ctx, auth) + if err != nil { + return nil, err + } + tok, err := bt.Refresh(ctx, authcfg) + if err != nil { + return nil, err + } + return tok, nil +} + +// FromToken returns a transport given a Challenge + Token. +func FromToken(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, pr *Challenge, tok *Token) (http.RoundTripper, error) { + if strings.ToLower(pr.Scheme) != "bearer" { + return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil + } + bt, err := fromChallenge(reg, auth, t, pr) + if err != nil { + return nil, err + } + if tok.Token != "" { + bt.bearer.RegistryToken = tok.Token + } + return &Wrapper{bt}, nil +} + +func fromChallenge(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, pr *Challenge, scopes ...string) (*bearerTransport, error) { + // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. + realm, ok := pr.Parameters["realm"] + if !ok { + return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.Parameters) + } + service := pr.Parameters["service"] + scheme := "https" + if pr.Insecure { + scheme = "http" + } + return &bearerTransport{ + inner: t, + basic: auth, + realm: realm, + registry: reg, + service: service, + scopes: scopes, + scheme: scheme, + }, nil +} + +type bearerTransport struct { + mx sync.RWMutex + // Wrapped by bearerTransport. + inner http.RoundTripper + // Basic credentials that we exchange for bearer tokens. + basic authn.Authenticator + // Holds the bearer response from the token service. + bearer authn.AuthConfig + // Registry to which we send bearer tokens. + registry name.Registry + // See https://tools.ietf.org/html/rfc6750#section-3 + realm string + // See https://docs.docker.com/registry/spec/auth/token/ + service string + scopes []string + // Scheme we should use, determined by ping response. + scheme string +} + +var _ http.RoundTripper = (*bearerTransport)(nil) + +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +func stringSet(ss []string) map[string]struct{} { + set := make(map[string]struct{}) + for _, s := range ss { + set[s] = struct{}{} + } + return set +} + +// RoundTrip implements http.RoundTripper +func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { + sendRequest := func() (*http.Response, error) { + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the registry with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if matchesHost(bt.registry.RegistryStr(), in, bt.scheme) { + bt.mx.RLock() + localToken := bt.bearer.RegistryToken + bt.mx.RUnlock() + hdr := fmt.Sprintf("Bearer %s", localToken) + in.Header.Set("Authorization", hdr) + } + return bt.inner.RoundTrip(in) + } + + res, err := sendRequest() + if err != nil { + return nil, err + } + + // If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope. + if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 { + // close out old response, since we will not return it. + res.Body.Close() + + newScopes := []string{} + bt.mx.Lock() + got := stringSet(bt.scopes) + for _, wac := range challenges { + // TODO(jonjohnsonjr): Should we also update "realm" or "service"? + if want, ok := wac.Parameters["scope"]; ok { + // Add any scopes that we don't already request. + if _, ok := got[want]; !ok { + newScopes = append(newScopes, want) + } + } + } + + // Some registries seem to only look at the first scope parameter during a token exchange. + // If a request fails because it's missing a scope, we should put those at the beginning, + // otherwise the registry might just ignore it :/ + newScopes = append(newScopes, bt.scopes...) + bt.scopes = newScopes + bt.mx.Unlock() + + // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. + + // Retry the request to attempt to get a valid token. + if err = bt.refresh(in.Context()); err != nil { + return nil, err + } + return sendRequest() + } + + return res, err +} + +// It's unclear which authentication flow to use based purely on the protocol, +// so we rely on heuristics and fallbacks to support as many registries as possible. +// The basic token exchange is attempted first, falling back to the oauth flow. +// If the IdentityToken is set, this indicates that we should start with the oauth flow. +func (bt *bearerTransport) refresh(ctx context.Context) error { + auth, err := authn.Authorization(ctx, bt.basic) + if err != nil { + return err + } + + if auth.RegistryToken != "" { + bt.mx.Lock() + bt.bearer.RegistryToken = auth.RegistryToken + bt.mx.Unlock() + return nil + } + + response, err := bt.Refresh(ctx, auth) + if err != nil { + return err + } + + // Some registries set access_token instead of token. See #54. + if response.AccessToken != "" { + response.Token = response.AccessToken + } + + // Find a token to turn into a Bearer authenticator + if response.Token != "" { + bt.mx.Lock() + bt.bearer.RegistryToken = response.Token + bt.mx.Unlock() + } + + // If we obtained a refresh token from the oauth flow, use that for refresh() now. + if response.RefreshToken != "" { + bt.basic = authn.FromConfig(authn.AuthConfig{ + IdentityToken: response.RefreshToken, + }) + } + + return nil +} + +func (bt *bearerTransport) Refresh(ctx context.Context, auth *authn.AuthConfig) (*Token, error) { + var ( + content []byte + err error + ) + if auth.IdentityToken != "" { + // If the secret being stored is an identity token, + // the Username should be set to , which indicates + // we are using an oauth flow. + content, err = bt.refreshOauth(ctx) + var terr *Error + if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound { + // Note: Not all token servers implement oauth2. + // If the request to the endpoint returns 404 using the HTTP POST method, + // refer to Token Documentation for using the HTTP GET method supported by all token servers. + content, err = bt.refreshBasic(ctx) + } + } else { + content, err = bt.refreshBasic(ctx) + } + if err != nil { + return nil, err + } + + var response Token + if err := json.Unmarshal(content, &response); err != nil { + return nil, err + } + + if response.Token == "" && response.AccessToken == "" { + return &response, fmt.Errorf("no token in bearer response:\n%s", content) + } + + return &response, nil +} + +func matchesHost(host string, in *http.Request, scheme string) bool { + canonicalHeaderHost := canonicalAddress(in.Host, scheme) + canonicalURLHost := canonicalAddress(in.URL.Host, scheme) + canonicalRegistryHost := canonicalAddress(host, scheme) + return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost +} + +func canonicalAddress(host, scheme string) (address string) { + // The host may be any one of: + // - hostname + // - hostname:port + // - ipv4 + // - ipv4:port + // - ipv6 + // - [ipv6]:port + // As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt + // to call it when we know that the address contains a port + if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) { + hostname, port, err := net.SplitHostPort(host) + if err != nil { + return host + } + if port == "" { + port = portMap[scheme] + } + + return net.JoinHostPort(hostname, port) + } + + return net.JoinHostPort(host, portMap[scheme]) +} + +// https://docs.docker.com/registry/spec/auth/oauth/ +func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { + auth, err := authn.Authorization(ctx, bt.basic) + if err != nil { + return nil, err + } + + u, err := url.Parse(bt.realm) + if err != nil { + return nil, err + } + + v := url.Values{} + bt.mx.RLock() + v.Set("scope", strings.Join(bt.scopes, " ")) + bt.mx.RUnlock() + if bt.service != "" { + v.Set("service", bt.service) + } + v.Set("client_id", defaultUserAgent) + if auth.IdentityToken != "" { + v.Set("grant_type", "refresh_token") + v.Set("refresh_token", auth.IdentityToken) + } else if auth.Username != "" && auth.Password != "" { + // TODO(#629): This is unreachable. + v.Set("grant_type", "password") + v.Set("username", auth.Username) + v.Set("password", auth.Password) + v.Set("access_type", "offline") + } + + client := http.Client{Transport: bt.inner} + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + // We don't want to log credentials. + ctx = redact.NewContext(ctx, "oauth token response contains credentials") + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + if bt.basic == authn.Anonymous { + logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + } + return nil, err + } + + return io.ReadAll(resp.Body) +} + +// https://docs.docker.com/registry/spec/auth/token/ +func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { + u, err := url.Parse(bt.realm) + if err != nil { + return nil, err + } + b := &basicTransport{ + inner: bt.inner, + auth: bt.basic, + target: u.Host, + } + client := http.Client{Transport: b} + + v := u.Query() + bt.mx.RLock() + v["scope"] = bt.scopes + bt.mx.RUnlock() + v.Set("service", bt.service) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + // We don't want to log credentials. + ctx = redact.NewContext(ctx, "basic token response contains credentials") + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + if bt.basic == authn.Anonymous { + logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + } + return nil, err + } + + return io.ReadAll(resp.Body) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go new file mode 100644 index 0000000000..ff7025b5c0 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides facilities for setting up an authenticated +// http.RoundTripper given an Authenticator and base RoundTripper. See +// transport.New for more information. +package transport diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go new file mode 100644 index 0000000000..d38e676249 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -0,0 +1,196 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "github.com/google/go-containerregistry/internal/redact" +) + +// Error implements error to support the following error specification: +// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors +type Error struct { + Errors []Diagnostic `json:"errors,omitempty"` + // The http status code returned. + StatusCode int + // The request that failed. + Request *http.Request + // The raw body if we couldn't understand it. + rawBody string + + // Bit of a hack to make it easier to force a retry. + temporary bool +} + +// Check that Error implements error +var _ error = (*Error)(nil) + +// Error implements error +func (e *Error) Error() string { + prefix := "" + if e.Request != nil { + prefix = fmt.Sprintf("%s %s: ", e.Request.Method, redact.URL(e.Request.URL)) + } + return prefix + e.responseErr() +} + +func (e *Error) responseErr() string { + switch len(e.Errors) { + case 0: + if len(e.rawBody) == 0 { + if e.Request != nil && e.Request.Method == http.MethodHead { + return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode)) + } + return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode)) + } + return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody) + case 1: + return e.Errors[0].String() + default: + var errors []string + for _, d := range e.Errors { + errors = append(errors, d.String()) + } + return fmt.Sprintf("multiple errors returned: %s", + strings.Join(errors, "; ")) + } +} + +// Temporary returns whether the request that preceded the error is temporary. +func (e *Error) Temporary() bool { + if e.temporary { + return true + } + + if len(e.Errors) == 0 { + _, ok := temporaryStatusCodes[e.StatusCode] + return ok + } + for _, d := range e.Errors { + if _, ok := temporaryErrorCodes[d.Code]; !ok { + return false + } + } + return true +} + +// Diagnostic represents a single error returned by a Docker registry interaction. +type Diagnostic struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail any `json:"detail,omitempty"` +} + +// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] +func (d Diagnostic) String() string { + msg := fmt.Sprintf("%s: %s", d.Code, d.Message) + if d.Detail != nil { + msg = fmt.Sprintf("%s; %v", msg, d.Detail) + } + return msg +} + +// ErrorCode is an enumeration of supported error codes. +type ErrorCode string + +// The set of error conditions a registry may return: +// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors-2 +const ( + BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" + BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" + BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" + DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" + ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" + ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" + ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" + ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" + NameInvalidErrorCode ErrorCode = "NAME_INVALID" + NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" + SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" + TagInvalidErrorCode ErrorCode = "TAG_INVALID" + UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" + DeniedErrorCode ErrorCode = "DENIED" + UnsupportedErrorCode ErrorCode = "UNSUPPORTED" + TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS" + UnknownErrorCode ErrorCode = "UNKNOWN" + + // This isn't defined by either docker or OCI spec, but is defined by docker/distribution: + // https://github.com/distribution/distribution/blob/6a977a5a754baa213041443f841705888107362a/registry/api/errcode/register.go#L60 + UnavailableErrorCode ErrorCode = "UNAVAILABLE" +) + +// TODO: Include other error types. +var temporaryErrorCodes = map[ErrorCode]struct{}{ + BlobUploadInvalidErrorCode: {}, + TooManyRequestsErrorCode: {}, + UnknownErrorCode: {}, + UnavailableErrorCode: {}, +} + +var temporaryStatusCodes = map[int]struct{}{ + http.StatusRequestTimeout: {}, + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, +} + +// CheckError returns a structured error if the response status is not in codes. +func CheckError(resp *http.Response, codes ...int) error { + for _, code := range codes { + if resp.StatusCode == code { + // This is one of the supported status codes. + return nil + } + } + + b, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return makeError(resp, b) +} + +func makeError(resp *http.Response, body []byte) *Error { + // https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors + structuredError := &Error{} + + // This can fail if e.g. the response body is not valid JSON. That's fine, + // we'll construct an appropriate error string from the body and status code. + _ = json.Unmarshal(body, structuredError) + + structuredError.rawBody = string(body) + structuredError.StatusCode = resp.StatusCode + structuredError.Request = resp.Request + + return structuredError +} + +func retryError(resp *http.Response) error { + b, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + rerr := makeError(resp, b) + rerr.temporary = true + return rerr +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go new file mode 100644 index 0000000000..c341f844e6 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go @@ -0,0 +1,91 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "net/http/httputil" + "time" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/pkg/logs" +) + +type logTransport struct { + inner http.RoundTripper +} + +// NewLogger returns a transport that logs requests and responses to +// github.com/google/go-containerregistry/pkg/logs.Debug. +func NewLogger(inner http.RoundTripper) http.RoundTripper { + return &logTransport{inner} +} + +func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { + // Inspired by: github.com/motemen/go-loghttp + + // We redact token responses and binary blobs in response/request. + omitBody, reason := redact.FromContext(in.Context()) + if omitBody { + logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason) + } else { + logs.Debug.Printf("--> %s %s", in.Method, in.URL) + } + + // Save these headers so we can redact Authorization. + savedHeaders := in.Header.Clone() + if in.Header != nil && in.Header.Get("authorization") != "" { + in.Header.Set("authorization", "") + } + + b, err := httputil.DumpRequestOut(in, !omitBody) + if err == nil { + logs.Debug.Println(string(b)) + } else { + logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err) + } + + // Restore the non-redacted headers. + in.Header = savedHeaders + + start := time.Now() + out, err = t.inner.RoundTrip(in) + duration := time.Since(start) + if err != nil { + logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration) + } + if out != nil { + msg := fmt.Sprintf("<-- %d", out.StatusCode) + if out.Request != nil { + msg = fmt.Sprintf("%s %s", msg, out.Request.URL) + } + msg = fmt.Sprintf("%s (%s)", msg, duration) + + if omitBody { + msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason) + } + + logs.Debug.Print(msg) + + b, err := httputil.DumpResponse(out, !omitBody) + if err == nil { + logs.Debug.Println(string(b)) + } else { + logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err) + } + } + return +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go new file mode 100644 index 0000000000..799c7ea08b --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -0,0 +1,217 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" +) + +// 300ms is the default fallback period for go's DNS dialer but we could make this configurable. +var fallbackDelay = 300 * time.Millisecond + +type Challenge struct { + Scheme string + + // Following the challenge there are often key/value pairs + // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" + Parameters map[string]string + + // Whether we had to use http to complete the Ping. + Insecure bool +} + +// Ping does a GET /v2/ against the registry and returns the response. +func Ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*Challenge, error) { + // This first attempts to use "https" for every request, falling back to http + // if the registry matches our localhost heuristic or if it is intentionally + // set to insecure via name.NewInsecureRegistry. + schemes := []string{"https"} + if reg.Scheme() == "http" { + schemes = append(schemes, "http") + } + if len(schemes) == 1 { + return pingSingle(ctx, reg, t, schemes[0]) + } + return pingParallel(ctx, reg, t, schemes) +} + +func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*Challenge, error) { + client := http.Client{Transport: t} + url := fmt.Sprintf("%s://%s/v2/", scheme, reg.RegistryStr()) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer func() { + // By draining the body, make sure to reuse the connection made by + // the ping for the following access to the registry + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() + + insecure := scheme == "http" + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &Challenge{ + Insecure: insecure, + }, nil + case http.StatusUnauthorized: + if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { + // If we hit more than one, let's try to find one that we know how to handle. + wac := pickFromMultipleChallenges(challenges) + return &Challenge{ + Scheme: wac.Scheme, + Parameters: wac.Parameters, + Insecure: insecure, + }, nil + } + // Otherwise, just return the challenge without parameters. + return &Challenge{ + Scheme: resp.Header.Get("WWW-Authenticate"), + Insecure: insecure, + }, nil + default: + return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) + } +} + +// Based on the golang happy eyeballs dialParallel impl in net/dial.go. +func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*Challenge, error) { + returned := make(chan struct{}) + defer close(returned) + + type pingResult struct { + *Challenge + error + primary bool + done bool + } + + results := make(chan pingResult) + + startRacer := func(ctx context.Context, scheme string) { + pr, err := pingSingle(ctx, reg, t, scheme) + select { + case results <- pingResult{Challenge: pr, error: err, primary: scheme == "https", done: true}: + case <-returned: + if pr != nil { + logs.Debug.Printf("%s lost race", scheme) + } + } + } + + var primary, fallback pingResult + + primaryCtx, primaryCancel := context.WithCancel(ctx) + defer primaryCancel() + go startRacer(primaryCtx, schemes[0]) + + fallbackTimer := time.NewTimer(fallbackDelay) + defer fallbackTimer.Stop() + + for { + select { + case <-fallbackTimer.C: + fallbackCtx, fallbackCancel := context.WithCancel(ctx) + defer fallbackCancel() + go startRacer(fallbackCtx, schemes[1]) + + case res := <-results: + if res.error == nil { + return res.Challenge, nil + } + if res.primary { + primary = res + } else { + fallback = res + } + if primary.done && fallback.done { + return nil, multierrs{primary.error, fallback.error} + } + if res.primary && fallbackTimer.Stop() { + // Primary failed and we haven't started the fallback, + // reset time to start fallback immediately. + fallbackTimer.Reset(0) + } + } + } +} + +func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge { + // It might happen there are multiple www-authenticate headers, e.g. `Negotiate` and `Basic`. + // Picking simply the first one could result eventually in `unrecognized challenge` error, + // that's why we're looping through the challenges in search for one that can be handled. + allowedSchemes := []string{"basic", "bearer"} + + for _, wac := range challenges { + currentScheme := strings.ToLower(wac.Scheme) + for _, allowed := range allowedSchemes { + if allowed == currentScheme { + return wac + } + } + } + + return challenges[0] +} + +type multierrs []error + +func (m multierrs) Error() string { + var b strings.Builder + hasWritten := false + for _, err := range m { + if hasWritten { + b.WriteString("; ") + } + hasWritten = true + b.WriteString(err.Error()) + } + return b.String() +} + +func (m multierrs) As(target any) bool { + for _, err := range m { + if errors.As(err, target) { + return true + } + } + return false +} + +func (m multierrs) Is(target error) bool { + for _, err := range m { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go new file mode 100644 index 0000000000..093f55d02f --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go @@ -0,0 +1,111 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + "time" + + "github.com/google/go-containerregistry/internal/retry" +) + +// Sleep for 0.1 then 0.3 seconds. This should cover networking blips. +var defaultBackoff = retry.Backoff{ + Duration: 100 * time.Millisecond, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, +} + +var _ http.RoundTripper = (*retryTransport)(nil) + +// retryTransport wraps a RoundTripper and retries temporary network errors. +type retryTransport struct { + inner http.RoundTripper + backoff retry.Backoff + predicate retry.Predicate + codes []int +} + +// Option is a functional option for retryTransport. +type Option func(*options) + +type options struct { + backoff retry.Backoff + predicate retry.Predicate + codes []int +} + +// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib +type Backoff = retry.Backoff + +// WithRetryBackoff sets the backoff for retry operations. +func WithRetryBackoff(backoff Backoff) Option { + return func(o *options) { + o.backoff = backoff + } +} + +// WithRetryPredicate sets the predicate for retry operations. +func WithRetryPredicate(predicate func(error) bool) Option { + return func(o *options) { + o.predicate = predicate + } +} + +// WithRetryStatusCodes sets which http response codes will be retried. +func WithRetryStatusCodes(codes ...int) Option { + return func(o *options) { + o.codes = codes + } +} + +// NewRetry returns a transport that retries errors. +func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { + o := &options{ + backoff: defaultBackoff, + predicate: retry.IsTemporary, + } + + for _, opt := range opts { + opt(o) + } + + return &retryTransport{ + inner: inner, + backoff: o.backoff, + predicate: o.predicate, + codes: o.codes, + } +} + +func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { + roundtrip := func() error { + out, err = t.inner.RoundTrip(in) + if !retry.Ever(in.Context()) { + return nil + } + if out != nil { + for _, code := range t.codes { + if out.StatusCode == code { + return retryError(out) + } + } + } + return err + } + retry.Retry(roundtrip, t.predicate, t.backoff) + return +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go new file mode 100644 index 0000000000..05844db136 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go @@ -0,0 +1,44 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + + "github.com/google/go-containerregistry/pkg/name" +) + +type schemeTransport struct { + // Scheme we should use, determined by ping response. + scheme string + + // Registry we're talking to. + registry name.Registry + + // Wrapped by schemeTransport. + inner http.RoundTripper +} + +// RoundTrip implements http.RoundTripper +func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) { + // When we ping() the registry, we determine whether to use http or https + // based on which scheme was successful. That is only valid for the + // registry server and not e.g. a separate token server or blob storage, + // so we should only override the scheme if the host is the registry. + if matchesHost(st.registry.String(), in, st.scheme) { + in.URL.Scheme = st.scheme + } + return st.inner.RoundTrip(in) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go new file mode 100644 index 0000000000..c3b56f7a41 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +// Scopes suitable to qualify each Repository +const ( + PullScope string = "pull" + PushScope string = "push,pull" + // For now DELETE is PUSH, which is the read/write ACL. + DeleteScope string = PushScope + CatalogScope string = "catalog" +) diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go new file mode 100644 index 0000000000..bd539b44fb --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -0,0 +1,109 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "net/http" + "strings" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +// New returns a new RoundTripper based on the provided RoundTripper that has been +// setup to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +// +// Deprecated: Use NewWithContext. +func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + return NewWithContext(context.Background(), reg, auth, t, scopes) +} + +// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been +// set up to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +// In case the RoundTripper is already of the type Wrapper it assumes +// authentication was already done prior to this call, so it just returns +// the provided RoundTripper without further action +func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + // When the transport provided is of the type Wrapper this function assumes that the caller already + // executed the necessary login and check. + switch t.(type) { + case *Wrapper: + return t, nil + } + // The handshake: + // 1. Use "t" to ping() the registry for the authentication challenge. + // + // 2a. If we get back a 200, then simply use "t". + // + // 2b. If we get back a 401 with a Basic challenge, then use a transport + // that just attachs auth each roundtrip. + // + // 2c. If we get back a 401 with a Bearer challenge, then use a transport + // that attaches a bearer token to each request, and refreshes is on 401s. + // Perform an initial refresh to seed the bearer token. + + // First we ping the registry to determine the parameters of the authentication handshake + // (if one is even necessary). + pr, err := Ping(ctx, reg, t) + if err != nil { + return nil, err + } + + // Wrap t with a useragent transport unless we already have one. + if _, ok := t.(*userAgentTransport); !ok { + t = NewUserAgent(t, "") + } + + scheme := "https" + if pr.Insecure { + scheme = "http" + } + + // Wrap t in a transport that selects the appropriate scheme based on the ping response. + t = &schemeTransport{ + scheme: scheme, + registry: reg, + inner: t, + } + + if strings.ToLower(pr.Scheme) != "bearer" { + return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil + } + + bt, err := fromChallenge(reg, auth, t, pr) + if err != nil { + return nil, err + } + bt.scopes = scopes + + if err := bt.refresh(ctx); err != nil { + return nil, err + } + return &Wrapper{bt}, nil +} + +// Wrapper results in *not* wrapping supplied transport with additional logic such as retries, useragent and debug logging +// Consumers are opt-ing into providing their own transport without any additional wrapping. +type Wrapper struct { + inner http.RoundTripper +} + +// RoundTrip delegates to the inner RoundTripper +func (w *Wrapper) RoundTrip(in *http.Request) (*http.Response, error) { + return w.inner.RoundTrip(in) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go new file mode 100644 index 0000000000..74a9e71bdf --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go @@ -0,0 +1,94 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "runtime/debug" +) + +var ( + // Version can be set via: + // -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'" + Version string + + ggcrVersion = defaultUserAgent +) + +const ( + defaultUserAgent = "go-containerregistry" + moduleName = "github.com/google/go-containerregistry" +) + +type userAgentTransport struct { + inner http.RoundTripper + ua string +} + +func init() { + if v := version(); v != "" { + ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v) + } +} + +func version() string { + if Version != "" { + // Version was set via ldflags, just return it. + return Version + } + + info, ok := debug.ReadBuildInfo() + if !ok { + return "" + } + + // Happens for crane and gcrane. + if info.Main.Path == moduleName { + return info.Main.Version + } + + // Anything else. + for _, dep := range info.Deps { + if dep.Path == moduleName { + return dep.Version + } + } + + return "" +} + +// NewUserAgent returns an http.Roundtripper that sets the user agent to +// The provided string plus additional go-containerregistry information, +// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4: +// +// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4 +func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper { + if ua == "" { + ua = ggcrVersion + } else { + ua = fmt.Sprintf("%s %s", ua, ggcrVersion) + } + return &userAgentTransport{ + inner: inner, + ua: ua, + } +} + +// RoundTrip implements http.RoundTripper +func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) { + in.Header.Set("User-Agent", ut.ua) + return ut.inner.RoundTrip(in) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go new file mode 100644 index 0000000000..94d207de1a --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -0,0 +1,711 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "sync" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/retry" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Taggable is an interface that enables a manifest PUT (e.g. for tagging). +type Taggable interface { + RawManifest() ([]byte, error) +} + +// Write pushes the provided img to the specified image reference. +func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { + return Push(ref, img, options...) +} + +// writer writes the elements of an image to a remote image reference. +type writer struct { + repo name.Repository + auth authn.Authenticator + transport http.RoundTripper + + client *http.Client + + progress *progress + backoff Backoff + predicate retry.Predicate + + scopeLock sync.Mutex + // Keep track of scopes that we have already requested. + scopeSet map[string]struct{} + scopes []string +} + +func makeWriter(ctx context.Context, repo name.Repository, ls []v1.Layer, o *options) (*writer, error) { + auth := o.auth + if o.keychain != nil { + kauth, err := authn.Resolve(ctx, o.keychain, repo) + if err != nil { + return nil, err + } + auth = kauth + } + scopes := scopesForUploadingImage(repo, ls) + tr, err := transport.NewWithContext(ctx, repo.Registry, auth, o.transport, scopes) + if err != nil { + return nil, err + } + + scopeSet := map[string]struct{}{} + for _, scope := range scopes { + scopeSet[scope] = struct{}{} + } + return &writer{ + repo: repo, + client: &http.Client{Transport: tr}, + auth: auth, + transport: o.transport, + progress: o.progress, + backoff: o.retryBackoff, + predicate: o.retryPredicate, + scopes: scopes, + scopeSet: scopeSet, + }, nil +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (w *writer) url(path string) url.URL { + return url.URL{ + Scheme: w.repo.Scheme(), + Host: w.repo.RegistryStr(), + Path: path, + } +} + +func (w *writer) maybeUpdateScopes(ctx context.Context, ml *MountableLayer) error { + if ml.Reference.Context().String() == w.repo.String() { + return nil + } + if ml.Reference.Context().Registry.String() != w.repo.Registry.String() { + return nil + } + + scope := ml.Reference.Scope(transport.PullScope) + + w.scopeLock.Lock() + defer w.scopeLock.Unlock() + + if _, ok := w.scopeSet[scope]; !ok { + w.scopeSet[scope] = struct{}{} + w.scopes = append(w.scopes, scope) + + logs.Debug.Printf("Refreshing token to add scope %q", scope) + wt, err := transport.NewWithContext(ctx, w.repo.Registry, w.auth, w.transport, w.scopes) + if err != nil { + return err + } + w.client = &http.Client{Transport: wt} + } + + return nil +} + +// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. +func (w *writer) nextLocation(resp *http.Response) (string, error) { + loc := resp.Header.Get("Location") + if len(loc) == 0 { + return "", errors.New("missing Location header") + } + u, err := url.Parse(loc) + if err != nil { + return "", err + } + + // If the location header returned is just a url path, then fully qualify it. + // We cannot simply call w.url, since there might be an embedded query string. + return resp.Request.URL.ResolveReference(u).String(), nil +} + +// checkExistingBlob checks if a blob exists already in the repository by making a +// HEAD request to the blob store API. GCR performs an existence check on the +// initiation if "mount" is specified, even if no "from" sources are specified. +// However, this is not broadly applicable to all registries, e.g. ECR. +func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String())) + + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// initiateUpload initiates the blob upload, which starts with a POST that can +// optionally include the hash of the layer and a list of repositories from +// which that layer might be read. On failure, an error is returned. +// On success, the layer was either mounted (nothing more to do) or a blob +// upload was initiated and the body of that blob should be sent to the returned +// location. +func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) (location string, mounted bool, err error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) + uv := url.Values{} + if mount != "" && from != "" { + // Quay will fail if we specify a "mount" without a "from". + uv.Set("mount", mount) + uv.Set("from", from) + if origin != "" { + uv.Set("origin", origin) + } + } + u.RawQuery = uv.Encode() + + // Make the request to initiate the blob upload. + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return "", false, err + } + req.Header.Set("Content-Type", "application/json") + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + if from != "" { + // https://github.com/google/go-containerregistry/issues/1679 + logs.Warn.Printf("retrying without mount: %v", err) + return w.initiateUpload(ctx, "", "", "") + } + return "", false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + if from != "" { + // https://github.com/google/go-containerregistry/issues/1404 + logs.Warn.Printf("retrying without mount: %v", err) + return w.initiateUpload(ctx, "", "", "") + } + return "", false, err + } + + // Check the response code to determine the result. + switch resp.StatusCode { + case http.StatusCreated: + // We're done, we were able to fast-path. + return "", true, nil + case http.StatusAccepted: + // Proceed to PATCH, upload has begun. + loc, err := w.nextLocation(resp) + return loc, false, err + default: + panic("Unreachable: initiateUpload") + } +} + +// streamBlob streams the contents of the blob to the specified location. +// On failure, this will return an error. On success, this will return the location +// header indicating how to commit the streamed blob. +func (w *writer) streamBlob(ctx context.Context, layer v1.Layer, streamLocation string) (commitLocation string, rerr error) { + reset := func() {} + defer func() { + if rerr != nil { + reset() + } + }() + blob, err := layer.Compressed() + if err != nil { + return "", err + } + + getBody := layer.Compressed + if w.progress != nil { + var count int64 + blob = &progressReader{rc: blob, progress: w.progress, count: &count} + getBody = func() (io.ReadCloser, error) { + blob, err := layer.Compressed() + if err != nil { + return nil, err + } + return &progressReader{rc: blob, progress: w.progress, count: &count}, nil + } + reset = func() { + w.progress.complete(-count) + } + } + + req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) + if err != nil { + return "", err + } + if _, ok := layer.(*stream.Layer); !ok { + // We can't retry streaming layers. + req.GetBody = getBody + + // If we know the size, set it. + if size, err := layer.Size(); err == nil { + req.ContentLength = size + } + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { + return "", err + } + + // The blob has been uploaded, return the location header indicating + // how to commit this layer. + return w.nextLocation(resp) +} + +// commitBlob commits this blob by sending a PUT to the location returned from +// streaming the blob. +func (w *writer) commitBlob(ctx context.Context, location, digest string) error { + u, err := url.Parse(location) + if err != nil { + return err + } + v := u.Query() + v.Set("digest", digest) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPut, u.String(), nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + return transport.CheckError(resp, http.StatusCreated) +} + +// incrProgress increments and sends a progress update, if WithProgress is used. +func (w *writer) incrProgress(written int64) { + if w.progress == nil { + return + } + w.progress.complete(written) +} + +// uploadOne performs a complete upload of a single layer. +func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { + tryUpload := func() error { + ctx := retry.Never(ctx) + var from, mount, origin string + if h, err := l.Digest(); err == nil { + // If we know the digest, this isn't a streaming layer. Do an existence + // check so we can skip uploading the layer if possible. + existing, err := w.checkExistingBlob(ctx, h) + if err != nil { + return err + } + if existing { + size, err := l.Size() + if err != nil { + return err + } + w.incrProgress(size) + logs.Progress.Printf("existing blob: %v", h) + return nil + } + + mount = h.String() + } + if ml, ok := l.(*MountableLayer); ok { + if err := w.maybeUpdateScopes(ctx, ml); err != nil { + return err + } + + from = ml.Reference.Context().RepositoryStr() + origin = ml.Reference.Context().RegistryStr() + + // This keeps breaking with DockerHub. + // https://github.com/google/go-containerregistry/issues/1741 + if w.repo.RegistryStr() == name.DefaultRegistry && origin != w.repo.RegistryStr() { + from = "" + origin = "" + } + } + + location, mounted, err := w.initiateUpload(ctx, from, mount, origin) + if err != nil { + return err + } else if mounted { + size, err := l.Size() + if err != nil { + return err + } + w.incrProgress(size) + h, err := l.Digest() + if err != nil { + return err + } + logs.Progress.Printf("mounted blob: %s", h.String()) + return nil + } + + // Only log layers with +json or +yaml. We can let through other stuff if it becomes popular. + // TODO(opencontainers/image-spec#791): Would be great to have an actual parser. + mt, err := l.MediaType() + if err != nil { + return err + } + smt := string(mt) + if !strings.HasSuffix(smt, "+json") && !strings.HasSuffix(smt, "+yaml") { + ctx = redact.NewContext(ctx, "omitting binary blobs from logs") + } + + location, err = w.streamBlob(ctx, l, location) + if err != nil { + return err + } + + h, err := l.Digest() + if err != nil { + return err + } + digest := h.String() + + if err := w.commitBlob(ctx, location, digest); err != nil { + return err + } + logs.Progress.Printf("pushed blob: %s", digest) + return nil + } + + return retry.Retry(tryUpload, w.predicate, w.backoff) +} + +type withMediaType interface { + MediaType() (types.MediaType, error) +} + +// This is really silly, but go interfaces don't let me satisfy remote.Taggable +// with remote.Descriptor because of name collisions between method names and +// struct fields. +// +// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or +// create a descriptor based on the RawManifest and (optionally) MediaType. +func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) { + if d, ok := t.(*Descriptor); ok { + return d.Manifest, &d.Descriptor, nil + } + b, err := t.RawManifest() + if err != nil { + return nil, nil, err + } + + // A reasonable default if Taggable doesn't implement MediaType. + mt := types.DockerManifestSchema2 + + if wmt, ok := t.(withMediaType); ok { + m, err := wmt.MediaType() + if err != nil { + return nil, nil, err + } + mt = m + } + + h, sz, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + + return b, &v1.Descriptor{ + MediaType: mt, + Size: sz, + Digest: h, + }, nil +} + +// commitSubjectReferrers is responsible for updating the fallback tag manifest to track descriptors referring to a subject for registries that don't yet support the Referrers API. +// TODO: use conditional requests to avoid race conditions +func (w *writer) commitSubjectReferrers(ctx context.Context, sub name.Digest, add v1.Descriptor) error { + // Check if the registry supports Referrers API. + // TODO: This should be done once per registry, not once per subject. + u := w.url(fmt.Sprintf("/v2/%s/referrers/%s", w.repo.RepositoryStr(), sub.DigestStr())) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return err + } + req.Header.Set("Accept", string(types.OCIImageIndex)) + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { + return err + } + if resp.StatusCode == http.StatusOK { + // The registry supports Referrers API. The registry is responsible for updating the referrers list. + return nil + } + + // The registry doesn't support Referrers API, we need to update the manifest tagged with the fallback tag. + // Make the request to GET the current manifest. + t := fallbackTag(sub) + u = w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), t.Identifier())) + req, err = http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return err + } + req.Header.Set("Accept", string(types.OCIImageIndex)) + resp, err = w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + var im v1.IndexManifest + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return err + } else if resp.StatusCode == http.StatusNotFound { + // Not found just means there are no attachments. Start with an empty index. + im = v1.IndexManifest{ + SchemaVersion: 2, + MediaType: types.OCIImageIndex, + Manifests: []v1.Descriptor{add}, + } + } else { + if err := json.NewDecoder(resp.Body).Decode(&im); err != nil { + return err + } + if im.SchemaVersion != 2 { + return fmt.Errorf("fallback tag manifest is not a schema version 2: %d", im.SchemaVersion) + } + if im.MediaType != types.OCIImageIndex { + return fmt.Errorf("fallback tag manifest is not an OCI image index: %s", im.MediaType) + } + for _, desc := range im.Manifests { + if desc.Digest == add.Digest { + // The digest is already attached, nothing to do. + logs.Progress.Printf("fallback tag %s already had referrer", t.Identifier()) + return nil + } + } + // Append the new descriptor to the index. + im.Manifests = append(im.Manifests, add) + } + + // Sort the manifests for reproducibility. + sort.Slice(im.Manifests, func(i, j int) bool { + return im.Manifests[i].Digest.String() < im.Manifests[j].Digest.String() + }) + logs.Progress.Printf("updating fallback tag %s with new referrer", t.Identifier()) + return w.commitManifest(ctx, fallbackTaggable{im}, t) +} + +type fallbackTaggable struct { + im v1.IndexManifest +} + +func (f fallbackTaggable) RawManifest() ([]byte, error) { return json.Marshal(f.im) } +func (f fallbackTaggable) MediaType() (types.MediaType, error) { return types.OCIImageIndex, nil } + +// commitManifest does a PUT of the image's manifest. +func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error { + // If the manifest refers to a subject, we need to check whether we need to update the fallback tag manifest. + raw, err := t.RawManifest() + if err != nil { + return err + } + var mf struct { + MediaType types.MediaType `json:"mediaType"` + Subject *v1.Descriptor `json:"subject,omitempty"` + Config struct { + MediaType types.MediaType `json:"mediaType"` + } `json:"config"` + } + if err := json.Unmarshal(raw, &mf); err != nil { + return err + } + + tryUpload := func() error { + ctx := retry.Never(ctx) + raw, desc, err := unpackTaggable(t) + if err != nil { + return err + } + + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier())) + + // Make the request to PUT the serialized manifest + req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) + if err != nil { + return err + } + req.Header.Set("Content-Type", string(desc.MediaType)) + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { + return err + } + + // If the manifest referred to a subject, we may need to update the fallback tag manifest. + // TODO: If this fails, we'll retry the whole upload. We should retry just this part. + if mf.Subject != nil { + h, size, err := v1.SHA256(bytes.NewReader(raw)) + if err != nil { + return err + } + desc := v1.Descriptor{ + ArtifactType: string(mf.Config.MediaType), + MediaType: mf.MediaType, + Digest: h, + Size: size, + } + if err := w.commitSubjectReferrers(ctx, + ref.Context().Digest(mf.Subject.Digest.String()), + desc); err != nil { + return err + } + } + + // The image was successfully pushed! + logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size) + w.incrProgress(int64(len(raw))) + return nil + } + + return retry.Retry(tryUpload, w.predicate, w.backoff) +} + +func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { + // use a map as set to remove duplicates scope strings + scopeSet := map[string]struct{}{} + + for _, l := range layers { + if ml, ok := l.(*MountableLayer); ok { + // we will add push scope for ref.Context() after the loop. + // for now we ask pull scope for references of the same registry + if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() { + scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{} + } + } + } + + scopes := make([]string, 0) + // Push scope should be the first element because a few registries just look at the first scope to determine access. + scopes = append(scopes, repo.Scope(transport.PushScope)) + + for scope := range scopeSet { + scopes = append(scopes, scope) + } + + return scopes +} + +// WriteIndex pushes the provided ImageIndex to the specified image reference. +// WriteIndex will attempt to push all of the referenced manifests before +// attempting to push the ImageIndex, to retain referential integrity. +func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) { + return Push(ref, ii, options...) +} + +// WriteLayer uploads the provided Layer to the specified repo. +func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) { + o, err := makeOptions(options...) + if err != nil { + return err + } + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() + } + return newPusher(o).Upload(o.context, repo, layer) +} + +// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/ +// +// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and +// remote.Descriptor. +// +// If t implements MediaType, we will use that for the Content-Type, otherwise +// we will default to types.DockerManifestSchema2. +// +// Tag does not attempt to write anything other than the manifest, so callers +// should ensure that all blobs or manifests that are referenced by t exist +// in the target registry. +func Tag(tag name.Tag, t Taggable, options ...Option) error { + return Put(tag, t, options...) +} + +// Put adds a manifest from the given Taggable via PUT /v1/.../manifest/ +// +// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and +// remote.Descriptor. +// +// If t implements MediaType, we will use that for the Content-Type, otherwise +// we will default to types.DockerManifestSchema2. +// +// Put does not attempt to write anything other than the manifest, so callers +// should ensure that all blobs or manifests that are referenced by t exist +// in the target registry. +func Put(ref name.Reference, t Taggable, options ...Option) error { + o, err := makeOptions(options...) + if err != nil { + return err + } + return newPusher(o).Put(o.context, ref, t) +} + +// Push uploads the given Taggable to the specified reference. +func Push(ref name.Reference, t Taggable, options ...Option) (rerr error) { + o, err := makeOptions(options...) + if err != nil { + return err + } + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() + } + return newPusher(o).Push(o.context, ref, t) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md new file mode 100644 index 0000000000..da0dda48d9 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md @@ -0,0 +1,68 @@ +# `stream` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream) + +The `stream` package contains an implementation of +[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer) +that supports _streaming_ access, i.e. the layer contents are read once and not +buffered. + +## Usage + +```go +package main + +import ( + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/stream" +) + +// upload the contents of stdin as a layer to a local registry +func main() { + repo, err := name.NewRepository("localhost:5000/stream") + if err != nil { + panic(err) + } + + layer := stream.NewLayer(os.Stdin) + + if err := remote.WriteLayer(repo, layer); err != nil { + panic(err) + } +} +``` + +## Structure + +This implements the layer portion of an [image +upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that +is responsible for hashing the uncompressed contents to compute the `DiffID`, +gzipping them to produce the `Compressed` contents, and hashing/counting the +bytes to produce the `Digest`/`Size`. This goroutine writes to an +`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from +the corresponding `io.PipeReader`. + +

+ +

+ +## Caveats + +This assumes that you have an uncompressed layer (i.e. a tarball) and would like +to compress it. Calling `Uncompressed` is always an error. Likewise, other +methods are invalid until the contents of `Compressed` have been completely +consumed and `Close`d. + +Using a `stream.Layer` will likely not work without careful consideration. For +example, in the `mutate` package, we defer computing the manifest and config +file until they are actually called. This allows you to `mutate.Append` a +streaming layer to an image without accidentally consuming it. Similarly, in +`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the +layer anyway, understanding that we may be dealing with a `stream.Layer` whose +contents need to be uploaded before we can upload the config file. + +Given the [structure](#structure) of how this is implemented, forgetting to +`Close` a `stream.Layer` will leak a goroutine. diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go new file mode 100644 index 0000000000..2b03544795 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -0,0 +1,275 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stream implements a single-pass streaming v1.Layer. +package stream + +import ( + "bufio" + "compress/gzip" + "crypto" + "encoding/hex" + "errors" + "hash" + "io" + "os" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var ( + // ErrNotComputed is returned when the requested value is not yet + // computed because the stream has not been consumed yet. + ErrNotComputed = errors.New("value not computed until stream is consumed") + + // ErrConsumed is returned by Compressed when the underlying stream has + // already been consumed and closed. + ErrConsumed = errors.New("stream was already consumed") +) + +// Layer is a streaming implementation of v1.Layer. +type Layer struct { + blob io.ReadCloser + consumed bool + compression int + + mu sync.Mutex + digest, diffID *v1.Hash + size int64 + mediaType types.MediaType +} + +var _ v1.Layer = (*Layer)(nil) + +// LayerOption applies options to layer +type LayerOption func(*Layer) + +// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values. +func WithCompressionLevel(level int) LayerOption { + return func(l *Layer) { + l.compression = level + } +} + +// WithMediaType is a functional option for overriding the layer's media type. +func WithMediaType(mt types.MediaType) LayerOption { + return func(l *Layer) { + l.mediaType = mt + } +} + +// NewLayer creates a Layer from an io.ReadCloser. +func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer { + layer := &Layer{ + blob: rc, + compression: gzip.BestSpeed, + // We use DockerLayer for now as uncompressed layers + // are unimplemented + mediaType: types.DockerLayer, + } + + for _, opt := range opts { + opt(layer) + } + + return layer +} + +// Digest implements v1.Layer. +func (l *Layer) Digest() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.digest == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.digest, nil +} + +// DiffID implements v1.Layer. +func (l *Layer) DiffID() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.diffID == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.diffID, nil +} + +// Size implements v1.Layer. +func (l *Layer) Size() (int64, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.size == 0 { + return 0, ErrNotComputed + } + return l.size, nil +} + +// MediaType implements v1.Layer +func (l *Layer) MediaType() (types.MediaType, error) { + return l.mediaType, nil +} + +// Uncompressed implements v1.Layer. +func (l *Layer) Uncompressed() (io.ReadCloser, error) { + return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") +} + +// Compressed implements v1.Layer. +func (l *Layer) Compressed() (io.ReadCloser, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.consumed { + return nil, ErrConsumed + } + return newCompressedReader(l) +} + +// finalize sets the layer to consumed and computes all hash and size values. +func (l *Layer) finalize(uncompressed, compressed hash.Hash, size int64) error { + l.mu.Lock() + defer l.mu.Unlock() + + diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(uncompressed.Sum(nil))) + if err != nil { + return err + } + l.diffID = &diffID + + digest, err := v1.NewHash("sha256:" + hex.EncodeToString(compressed.Sum(nil))) + if err != nil { + return err + } + l.digest = &digest + + l.size = size + l.consumed = true + return nil +} + +type compressedReader struct { + pr io.Reader + closer func() error +} + +func newCompressedReader(l *Layer) (*compressedReader, error) { + // Collect digests of compressed and uncompressed stream and size of + // compressed stream. + h := crypto.SHA256.New() + zh := crypto.SHA256.New() + count := &countWriter{} + + // gzip.Writer writes to the output stream via pipe, a hasher to + // capture compressed digest, and a countWriter to capture compressed + // size. + pr, pw := io.Pipe() + + // Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count. + mw := io.MultiWriter(pw, zh, count) + + // Buffer the output of the gzip writer so we don't have to wait on pr to keep writing. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(mw, 2<<16) + zw, err := gzip.NewWriterLevel(bw, l.compression) + if err != nil { + return nil, err + } + + doneDigesting := make(chan struct{}) + + cr := &compressedReader{ + pr: pr, + closer: func() error { + // Immediately close pw without error. There are three ways to get + // here. + // + // 1. There was a copy error due from the underlying reader, in which + // case the error will not be overwritten. + // 2. Copying from the underlying reader completed successfully. + // 3. Close has been called before the underlying reader has been + // fully consumed. In this case pw must be closed in order to + // keep the flush of bw from blocking indefinitely. + // + // NOTE: pw.Close never returns an error. The signature is only to + // implement io.Closer. + _ = pw.Close() + + // Close the inner ReadCloser. + // + // NOTE: net/http will call close on success, so if we've already + // closed the inner rc, it's not an error. + if err := l.blob.Close(); err != nil && !errors.Is(err, os.ErrClosed) { + return err + } + + // Finalize layer with its digest and size values. + <-doneDigesting + return l.finalize(h, zh, count.n) + }, + } + go func() { + // Copy blob into the gzip writer, which also hashes and counts the + // size of the compressed output, and hasher of the raw contents. + _, copyErr := io.Copy(io.MultiWriter(h, zw), l.blob) + + // Close the gzip writer once copying is done. If this is done in the + // Close method of compressedReader instead, then it can cause a panic + // when the compressedReader is closed before the blob is fully + // consumed and io.Copy in this goroutine is still blocking. + closeErr := zw.Close() + + // Check errors from writing and closing streams. + if copyErr != nil { + close(doneDigesting) + pw.CloseWithError(copyErr) + return + } + if closeErr != nil { + close(doneDigesting) + pw.CloseWithError(closeErr) + return + } + + // Flush the buffer once all writes are complete to the gzip writer. + if err := bw.Flush(); err != nil { + close(doneDigesting) + pw.CloseWithError(err) + return + } + + // Notify closer that digests are done being written. + close(doneDigesting) + + // Close the compressed reader to calculate digest/diffID/size. This + // will cause pr to return EOF which will cause readers of the + // Compressed stream to finish reading. + pw.CloseWithError(cr.Close()) + }() + + return cr, nil +} + +func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } + +func (cr *compressedReader) Close() error { return cr.closer() } + +// countWriter counts bytes written to it. +type countWriter struct{ n int64 } + +func (c *countWriter) Write(p []byte) (int, error) { + c.n += int64(len(p)) + return len(p), nil +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md new file mode 100644 index 0000000000..03f339b063 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md @@ -0,0 +1,280 @@ +# `tarball` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball) + +This package produces tarballs that can consumed via `docker load`. Note +that this is a _different_ format from the [`legacy`](/pkg/legacy/tarball) +tarballs that are produced by `docker save`, but this package is still able to +read the legacy tarballs produced by `docker save`. + +## Usage + +```go +package main + +import ( + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +func main() { + // Read a tarball from os.Args[1] that contains ubuntu. + tag, err := name.NewTag("ubuntu") + if err != nil { + panic(err) + } + img, err := tarball.ImageFromPath(os.Args[1], &tag) + if err != nil { + panic(err) + } + + // Write that tarball to os.Args[2] with a different tag. + newTag, err := name.NewTag("ubuntu:newest") + if err != nil { + panic(err) + } + f, err := os.Create(os.Args[2]) + if err != nil { + panic(err) + } + defer f.Close() + + if err := tarball.Write(newTag, img, f); err != nil { + panic(err) + } +} +``` + +## Structure + +

+ +

+ +Let's look at what happens when we write out a tarball: + + +### `ubuntu:latest` + +``` +$ crane pull ubuntu ubuntu.tar && mkdir ubuntu && tar xf ubuntu.tar -C ubuntu && rm ubuntu.tar +$ tree ubuntu/ +ubuntu/ +├── 423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz +├── b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz +├── de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz +├── f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz +├── manifest.json +└── sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c + +0 directories, 6 files +``` + +There are a couple interesting files here. + +`manifest.json` is the entrypoint: a list of [`tarball.Descriptor`s](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball#Descriptor) +that describe the images contained in this tarball. + +For each image, this has the `RepoTags` (how it was pulled), a `Config` file +that points to the image's config file, a list of `Layers`, and (optionally) +`LayerSources`. + +``` +$ jq < ubuntu/manifest.json +[ + { + "Config": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c", + "RepoTags": [ + "ubuntu" + ], + "Layers": [ + "423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz", + "de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz", + "f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz", + "b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz" + ] + } +] +``` + +The config file and layers are exactly what you would expect, and match the +registry representations of the same artifacts. You'll notice that the +`manifest.json` contains similar information as the registry manifest, but isn't +quite the same: + +``` +$ crane manifest ubuntu@sha256:0925d086715714114c1988f7c947db94064fd385e171a63c07730f1fa014e6f9 +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 3408, + "digest": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 26692096, + "digest": "sha256:423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 35365, + "digest": "sha256:de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 852, + "digest": "sha256:f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 163, + "digest": "sha256:b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7" + } + ] +} +``` + +This makes it difficult to maintain image digests when roundtripping images +through the tarball format, so it's not a great format if you care about +provenance. + +The ubuntu example didn't have any `LayerSources` -- let's look at another image +that does. + +### `hello-world:nanoserver` + +``` +$ crane pull hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b nanoserver.tar +$ mkdir nanoserver && tar xf nanoserver.tar -C nanoserver && rm nanoserver.tar +$ tree nanoserver/ +nanoserver/ +├── 10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz +├── a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz +├── be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz +├── manifest.json +└── sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 + +0 directories, 5 files + +$ jq < nanoserver/manifest.json +[ + { + "Config": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6", + "RepoTags": [ + "index.docker.io/library/hello-world:i-was-a-digest" + ], + "Layers": [ + "a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz", + "be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz", + "10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz" + ], + "LayerSources": { + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + } + } + } +] +``` + +A couple things to note about this `manifest.json` versus the other: +* The `RepoTags` field is a bit weird here. `hello-world` is a multi-platform + image, so We had to pull this image by digest, since we're (I'm) on + amd64/linux and wanted to grab a windows image. Since the tarball format + expects a tag under `RepoTags`, and we didn't pull by tag, we replace the + digest with a sentinel `i-was-a-digest` "tag" to appease docker. +* The `LayerSources` has enough information to reconstruct the foreign layers + pointer when pushing/pulling from the registry. For legal reasons, microsoft + doesn't want anyone but them to serve windows base images, so the mediaType + here indicates a "foreign" or "non-distributable" layer with an URL for where + you can download it from microsoft (see the [OCI + image-spec](https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers)). + +We can look at what's in the registry to explain both of these things: +``` +$ crane manifest hello-world:nanoserver | jq . +{ + "manifests": [ + { + "digest": "sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b", + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "platform": { + "architecture": "amd64", + "os": "windows", + "os.version": "10.0.17763.1040" + }, + "size": 1124 + } + ], + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "schemaVersion": 2 +} + + +# Note the media type and "urls" field. +$ crane manifest hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b | jq . +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1721, + "digest": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 1669, + "digest": "sha256:be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 949, + "digest": "sha256:10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0" + } + ] +} +``` + +The `LayerSources` map is keyed by the diffid. Note that `sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e` matches the first layer in the config file: +``` +$ jq '.[0].LayerSources' < nanoserver/manifest.json +{ + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + } +} + +$ jq < nanoserver/sha256\:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 | jq .rootfs +{ + "type": "layers", + "diff_ids": [ + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e", + "sha256:601cf7d78c62e4b4d32a7bbf96a17606a9cea5bd9d22ffa6f34aa431d056b0e8", + "sha256:a1e1a3bf6529adcce4d91dce2cad86c2604a66b507ccbc4d2239f3da0ec5aab9" + ] +} +``` diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go new file mode 100644 index 0000000000..4eb79bb4e5 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tarball provides facilities for reading/writing v1.Images from/to +// a tarball on-disk. +package tarball diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go new file mode 100644 index 0000000000..aba609deac --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -0,0 +1,440 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sync" + + comp "github.com/google/go-containerregistry/internal/compression" + "github.com/google/go-containerregistry/pkg/compression" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type image struct { + opener Opener + manifest *Manifest + config []byte + imgDescriptor *Descriptor + + tag *name.Tag +} + +type uncompressedImage struct { + *image +} + +type compressedImage struct { + *image + manifestLock sync.Mutex // Protects manifest + manifest *v1.Manifest +} + +var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) +var _ partial.CompressedImageCore = (*compressedImage)(nil) + +// Opener is a thunk for opening a tar file. +type Opener func() (io.ReadCloser, error) + +func pathOpener(path string) Opener { + return func() (io.ReadCloser, error) { + return os.Open(path) + } +} + +// ImageFromPath returns a v1.Image from a tarball located on path. +func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { + return Image(pathOpener(path), tag) +} + +// LoadManifest load manifest +func LoadManifest(opener Opener) (Manifest, error) { + m, err := extractFileFromTar(opener, "manifest.json") + if err != nil { + return nil, err + } + defer m.Close() + + var manifest Manifest + + if err := json.NewDecoder(m).Decode(&manifest); err != nil { + return nil, err + } + return manifest, nil +} + +// Image exposes an image from the tarball at the provided path. +func Image(opener Opener, tag *name.Tag) (v1.Image, error) { + img := &image{ + opener: opener, + tag: tag, + } + if err := img.loadTarDescriptorAndConfig(); err != nil { + return nil, err + } + + // Peek at the first layer and see if it's compressed. + if len(img.imgDescriptor.Layers) > 0 { + compressed, err := img.areLayersCompressed() + if err != nil { + return nil, err + } + if compressed { + c := compressedImage{ + image: img, + } + return partial.CompressedToImage(&c) + } + } + + uc := uncompressedImage{ + image: img, + } + return partial.UncompressedToImage(&uc) +} + +func (i *image) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// Descriptor stores the manifest data for a single image inside a `docker save` tarball. +type Descriptor struct { + Config string + RepoTags []string + Layers []string + + // Tracks foreign layer info. Key is DiffID. + LayerSources map[v1.Hash]v1.Descriptor `json:",omitempty"` +} + +// Manifest represents the manifests of all images as the `manifest.json` file in a `docker save` tarball. +type Manifest []Descriptor + +func (m Manifest) findDescriptor(tag *name.Tag) (*Descriptor, error) { + if tag == nil { + if len(m) != 1 { + return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") + } + return &(m)[0], nil + } + for _, img := range m { + for _, tagStr := range img.RepoTags { + repoTag, err := name.NewTag(tagStr) + if err != nil { + return nil, err + } + + // Compare the resolved names, since there are several ways to specify the same tag. + if repoTag.Name() == tag.Name() { + return &img, nil + } + } + } + return nil, fmt.Errorf("tag %s not found in tarball", tag) +} + +func (i *image) areLayersCompressed() (bool, error) { + if len(i.imgDescriptor.Layers) == 0 { + return false, errors.New("0 layers found in image") + } + layer := i.imgDescriptor.Layers[0] + blob, err := extractFileFromTar(i.opener, layer) + if err != nil { + return false, err + } + defer blob.Close() + + cp, _, err := comp.PeekCompression(blob) + if err != nil { + return false, err + } + + return cp != compression.None, nil +} + +func (i *image) loadTarDescriptorAndConfig() error { + m, err := extractFileFromTar(i.opener, "manifest.json") + if err != nil { + return err + } + defer m.Close() + + if err := json.NewDecoder(m).Decode(&i.manifest); err != nil { + return err + } + + if i.manifest == nil { + return errors.New("no valid manifest.json in tarball") + } + + i.imgDescriptor, err = i.manifest.findDescriptor(i.tag) + if err != nil { + return err + } + + cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) + if err != nil { + return err + } + defer cfg.Close() + + i.config, err = io.ReadAll(cfg) + if err != nil { + return err + } + return nil +} + +func (i *image) RawConfigFile() ([]byte, error) { + return i.config, nil +} + +// tarFile represents a single file inside a tar. Closing it closes the tar itself. +type tarFile struct { + io.Reader + io.Closer +} + +func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { + f, err := opener() + if err != nil { + return nil, err + } + needClose := true + defer func() { + if needClose { + f.Close() + } + }() + + tf := tar.NewReader(f) + for { + hdr, err := tf.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + if hdr.Name == filePath { + if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink { + currentDir := filepath.Dir(filePath) + return extractFileFromTar(opener, path.Join(currentDir, path.Clean(hdr.Linkname))) + } + needClose = false + return tarFile{ + Reader: tf, + Closer: f, + }, nil + } + } + return nil, fmt.Errorf("file %s not found in tar", filePath) +} + +// uncompressedLayerFromTarball implements partial.UncompressedLayer +type uncompressedLayerFromTarball struct { + diffID v1.Hash + mediaType types.MediaType + opener Opener + filePath string +} + +// foreignUncompressedLayer implements partial.UncompressedLayer but returns +// a custom descriptor. This allows the foreign layer URLs to be included in +// the generated image manifest for uncompressed layers. +type foreignUncompressedLayer struct { + uncompressedLayerFromTarball + desc v1.Descriptor +} + +func (fl *foreignUncompressedLayer) Descriptor() (*v1.Descriptor, error) { + return &fl.desc, nil +} + +// DiffID implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { + return ulft.diffID, nil +} + +// Uncompressed implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { + return extractFileFromTar(ulft.opener, ulft.filePath) +} + +func (ulft *uncompressedLayerFromTarball) MediaType() (types.MediaType, error) { + return ulft.mediaType, nil +} + +func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + cfg, err := partial.ConfigFile(i) + if err != nil { + return nil, err + } + for idx, diffID := range cfg.RootFS.DiffIDs { + if diffID == h { + // Technically the media type should be 'application/tar' but given that our + // v1.Layer doesn't force consumers to care about whether the layer is compressed + // we should be fine returning the DockerLayer media type + mt := types.DockerLayer + bd, ok := i.imgDescriptor.LayerSources[h] + if ok { + // This is janky, but we don't want to implement Descriptor for + // uncompressed layers because it breaks a bunch of assumptions in partial. + // See https://github.com/google/go-containerregistry/issues/1870 + docker25workaround := bd.MediaType == types.DockerUncompressedLayer || bd.MediaType == types.OCIUncompressedLayer + + if !docker25workaround { + // Overwrite the mediaType for foreign layers. + return &foreignUncompressedLayer{ + uncompressedLayerFromTarball: uncompressedLayerFromTarball{ + diffID: diffID, + mediaType: bd.MediaType, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, + desc: bd, + }, nil + } + + // Intentional fall through. + } + + return &uncompressedLayerFromTarball{ + diffID: diffID, + mediaType: mt, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, nil + } + } + return nil, fmt.Errorf("diff id %q not found", h) +} + +func (c *compressedImage) Manifest() (*v1.Manifest, error) { + c.manifestLock.Lock() + defer c.manifestLock.Unlock() + if c.manifest != nil { + return c.manifest, nil + } + + b, err := c.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + c.manifest = &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + for i, p := range c.imgDescriptor.Layers { + cfg, err := partial.ConfigFile(c) + if err != nil { + return nil, err + } + diffid := cfg.RootFS.DiffIDs[i] + if d, ok := c.imgDescriptor.LayerSources[diffid]; ok { + // If it's a foreign layer, just append the descriptor so we can avoid + // reading the entire file. + c.manifest.Layers = append(c.manifest.Layers, d) + } else { + l, err := extractFileFromTar(c.opener, p) + if err != nil { + return nil, err + } + defer l.Close() + sha, size, err := v1.SHA256(l) + if err != nil { + return nil, err + } + c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ + MediaType: types.DockerLayer, + Size: size, + Digest: sha, + }) + } + } + return c.manifest, nil +} + +func (c *compressedImage) RawManifest() ([]byte, error) { + return partial.RawManifest(c) +} + +// compressedLayerFromTarball implements partial.CompressedLayer +type compressedLayerFromTarball struct { + desc v1.Descriptor + opener Opener + filePath string +} + +// Digest implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { + return clft.desc.Digest, nil +} + +// Compressed implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { + return extractFileFromTar(clft.opener, clft.filePath) +} + +// MediaType implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) MediaType() (types.MediaType, error) { + return clft.desc.MediaType, nil +} + +// Size implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Size() (int64, error) { + return clft.desc.Size, nil +} + +func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + m, err := c.Manifest() + if err != nil { + return nil, err + } + for i, l := range m.Layers { + if l.Digest == h { + fp := c.imgDescriptor.Layers[i] + return &compressedLayerFromTarball{ + desc: l, + opener: c.opener, + filePath: fp, + }, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go new file mode 100644 index 0000000000..8a26309618 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go @@ -0,0 +1,354 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "os" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/google/go-containerregistry/internal/and" + comp "github.com/google/go-containerregistry/internal/compression" + gestargz "github.com/google/go-containerregistry/internal/estargz" + ggzip "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + "github.com/google/go-containerregistry/pkg/compression" + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type layer struct { + digest v1.Hash + diffID v1.Hash + size int64 + compressedopener Opener + uncompressedopener Opener + compression compression.Compression + compressionLevel int + annotations map[string]string + estgzopts []estargz.Option + mediaType types.MediaType +} + +// Descriptor implements partial.withDescriptor. +func (l *layer) Descriptor() (*v1.Descriptor, error) { + digest, err := l.Digest() + if err != nil { + return nil, err + } + return &v1.Descriptor{ + Size: l.size, + Digest: digest, + Annotations: l.annotations, + MediaType: l.mediaType, + }, nil +} + +// Digest implements v1.Layer +func (l *layer) Digest() (v1.Hash, error) { + return l.digest, nil +} + +// DiffID implements v1.Layer +func (l *layer) DiffID() (v1.Hash, error) { + return l.diffID, nil +} + +// Compressed implements v1.Layer +func (l *layer) Compressed() (io.ReadCloser, error) { + return l.compressedopener() +} + +// Uncompressed implements v1.Layer +func (l *layer) Uncompressed() (io.ReadCloser, error) { + return l.uncompressedopener() +} + +// Size implements v1.Layer +func (l *layer) Size() (int64, error) { + return l.size, nil +} + +// MediaType implements v1.Layer +func (l *layer) MediaType() (types.MediaType, error) { + return l.mediaType, nil +} + +// LayerOption applies options to layer +type LayerOption func(*layer) + +// WithCompression is a functional option for overriding the default +// compression algorithm used for compressing uncompressed tarballs. +// Please note that WithCompression(compression.ZStd) should be used +// in conjunction with WithMediaType(types.OCILayerZStd) +func WithCompression(comp compression.Compression) LayerOption { + return func(l *layer) { + switch comp { + case compression.ZStd: + l.compression = compression.ZStd + case compression.GZip: + l.compression = compression.GZip + case compression.None: + logs.Warn.Printf("Compression type 'none' is not supported for tarball layers; using gzip compression.") + l.compression = compression.GZip + default: + logs.Warn.Printf("Unexpected compression type for WithCompression(): %s; using gzip compression instead.", comp) + l.compression = compression.GZip + } + } +} + +// WithCompressionLevel is a functional option for overriding the default +// compression level used for compressing uncompressed tarballs. +func WithCompressionLevel(level int) LayerOption { + return func(l *layer) { + l.compressionLevel = level + } +} + +// WithMediaType is a functional option for overriding the layer's media type. +func WithMediaType(mt types.MediaType) LayerOption { + return func(l *layer) { + l.mediaType = mt + } +} + +// WithCompressedCaching is a functional option that overrides the +// logic for accessing the compressed bytes to memoize the result +// and avoid expensive repeated gzips. +func WithCompressedCaching(l *layer) { + var once sync.Once + var err error + + buf := bytes.NewBuffer(nil) + og := l.compressedopener + + l.compressedopener = func() (io.ReadCloser, error) { + once.Do(func() { + var rc io.ReadCloser + rc, err = og() + if err == nil { + defer rc.Close() + _, err = io.Copy(buf, rc) + } + }) + if err != nil { + return nil, err + } + + return io.NopCloser(bytes.NewBuffer(buf.Bytes())), nil + } +} + +// WithEstargzOptions is a functional option that allow the caller to pass +// through estargz.Options to the underlying compression layer. This is +// only meaningful when estargz is enabled. +// +// Deprecated: WithEstargz is deprecated, and will be removed in a future release. +func WithEstargzOptions(opts ...estargz.Option) LayerOption { + return func(l *layer) { + l.estgzopts = opts + } +} + +// WithEstargz is a functional option that explicitly enables estargz support. +// +// Deprecated: WithEstargz is deprecated, and will be removed in a future release. +func WithEstargz(l *layer) { + oguncompressed := l.uncompressedopener + estargz := func() (io.ReadCloser, error) { + crc, err := oguncompressed() + if err != nil { + return nil, err + } + eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compressionLevel)) + rc, h, err := gestargz.ReadCloser(crc, eopts...) + if err != nil { + return nil, err + } + l.annotations[estargz.TOCJSONDigestAnnotation] = h.String() + return &and.ReadCloser{ + Reader: rc, + CloseFunc: func() error { + err := rc.Close() + if err != nil { + return err + } + // As an optimization, leverage the DiffID exposed by the estargz ReadCloser + l.diffID, err = v1.NewHash(rc.DiffID().String()) + return err + }, + }, nil + } + uncompressed := func() (io.ReadCloser, error) { + urc, err := estargz() + if err != nil { + return nil, err + } + return ggzip.UnzipReadCloser(urc) + } + + l.compressedopener = estargz + l.uncompressedopener = uncompressed +} + +// LayerFromFile returns a v1.Layer given a tarball +func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) { + opener := func() (io.ReadCloser, error) { + return os.Open(path) + } + return LayerFromOpener(opener, opts...) +} + +// LayerFromOpener returns a v1.Layer given an Opener function. +// The Opener may return either an uncompressed tarball (common), +// or a compressed tarball (uncommon). +// +// When using this in conjunction with something like remote.Write +// the uncompressed path may end up gzipping things multiple times: +// 1. Compute the layer SHA256 +// 2. Upload the compressed layer. +// +// Since gzip can be expensive, we support an option to memoize the +// compression that can be passed here: tarball.WithCompressedCaching +func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) { + comp, err := comp.GetCompression(opener) + if err != nil { + return nil, err + } + + layer := &layer{ + compression: compression.GZip, + compressionLevel: gzip.BestSpeed, + annotations: make(map[string]string, 1), + mediaType: types.DockerLayer, + } + + if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" { + logs.Warn.Println("GGCR_EXPERIMENT_ESTARGZ is deprecated, and will be removed in a future release.") + opts = append([]LayerOption{WithEstargz}, opts...) + } + + switch comp { + case compression.GZip: + layer.compressedopener = opener + layer.uncompressedopener = func() (io.ReadCloser, error) { + urc, err := opener() + if err != nil { + return nil, err + } + return ggzip.UnzipReadCloser(urc) + } + case compression.ZStd: + layer.compressedopener = opener + layer.uncompressedopener = func() (io.ReadCloser, error) { + urc, err := opener() + if err != nil { + return nil, err + } + return zstd.UnzipReadCloser(urc) + } + default: + layer.uncompressedopener = opener + layer.compressedopener = func() (io.ReadCloser, error) { + crc, err := opener() + if err != nil { + return nil, err + } + + if layer.compression == compression.ZStd { + return zstd.ReadCloserLevel(crc, layer.compressionLevel), nil + } + + return ggzip.ReadCloserLevel(crc, layer.compressionLevel), nil + } + } + + for _, opt := range opts { + opt(layer) + } + + // Warn if media type does not match compression + var mediaTypeMismatch = false + switch layer.compression { + case compression.GZip: + mediaTypeMismatch = + layer.mediaType != types.OCILayer && + layer.mediaType != types.OCIRestrictedLayer && + layer.mediaType != types.DockerLayer + + case compression.ZStd: + mediaTypeMismatch = layer.mediaType != types.OCILayerZStd + } + + if mediaTypeMismatch { + logs.Warn.Printf("Unexpected mediaType (%s) for selected compression in %s in LayerFromOpener().", layer.mediaType, layer.compression) + } + + if layer.digest, layer.size, err = computeDigest(layer.compressedopener); err != nil { + return nil, err + } + + empty := v1.Hash{} + if layer.diffID == empty { + if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil { + return nil, err + } + } + + return layer, nil +} + +// LayerFromReader returns a v1.Layer given a io.Reader. +// +// The reader's contents are read and buffered to a temp file in the process. +// +// Deprecated: Use LayerFromOpener or stream.NewLayer instead, if possible. +func LayerFromReader(reader io.Reader, opts ...LayerOption) (v1.Layer, error) { + tmp, err := os.CreateTemp("", "") + if err != nil { + return nil, fmt.Errorf("creating temp file to buffer reader: %w", err) + } + if _, err := io.Copy(tmp, reader); err != nil { + return nil, fmt.Errorf("writing temp file to buffer reader: %w", err) + } + return LayerFromFile(tmp.Name(), opts...) +} + +func computeDigest(opener Opener) (v1.Hash, int64, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, 0, err + } + defer rc.Close() + + return v1.SHA256(rc) +} + +func computeDiffID(opener Opener) (v1.Hash, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, err + } + defer rc.Close() + + digest, _, err := v1.SHA256(rc) + return digest, err +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go new file mode 100644 index 0000000000..e607df164a --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -0,0 +1,457 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" +) + +// WriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.Write with a new file. +func WriteToFile(p string, ref name.Reference, img v1.Image, opts ...WriteOption) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return Write(ref, img, w, opts...) +} + +// MultiWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiWrite with a new file. +func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image, opts ...WriteOption) error { + refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWriteToFile(p, refToImage, opts...) +} + +// MultiRefWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file. +func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image, opts ...WriteOption) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return MultiRefWrite(refToImage, w, opts...) +} + +// Write is a wrapper to write a single image and tag to a tarball. +func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) error { + return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...) +} + +// MultiWrite writes the contents of each image to the provided writer, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer, opts ...WriteOption) error { + refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWrite(refToImage, w, opts...) +} + +// MultiRefWrite writes the contents of each image to the provided writer, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer, opts ...WriteOption) error { + // process options + o := &writeOptions{ + updates: nil, + } + for _, option := range opts { + if err := option(o); err != nil { + return err + } + } + + imageToTags := dedupRefToImage(refToImage) + size, mBytes, err := getSizeAndManifest(imageToTags) + if err != nil { + return sendUpdateReturn(o, err) + } + + return writeImagesToTar(imageToTags, mBytes, size, w, o) +} + +// sendUpdateReturn return the passed in error message, also sending on update channel, if it exists +func sendUpdateReturn(o *writeOptions, err error) error { + if o != nil && o.updates != nil { + o.updates <- v1.Update{ + Error: err, + } + } + return err +} + +// sendProgressWriterReturn return the passed in error message, also sending on update channel, if it exists, along with downloaded information +func sendProgressWriterReturn(pw *progressWriter, err error) error { + if pw != nil { + return pw.Error(err) + } + return err +} + +// writeImagesToTar writes the images to the tarball +func writeImagesToTar(imageToTags map[v1.Image][]string, m []byte, size int64, w io.Writer, o *writeOptions) (err error) { + if w == nil { + return sendUpdateReturn(o, errors.New("must pass valid writer")) + } + + tw := w + var pw *progressWriter + + // we only calculate the sizes and use a progressWriter if we were provided + // an option with a progress channel + if o != nil && o.updates != nil { + pw = &progressWriter{ + w: w, + updates: o.updates, + size: size, + } + tw = pw + } + + tf := tar.NewWriter(tw) + defer tf.Close() + + seenLayerDigests := make(map[string]struct{}) + + for img := range imageToTags { + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + cfgBlob, err := img.RawConfigFile() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { + return sendProgressWriterReturn(pw, err) + } + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + if _, ok := seenLayerDigests[hex]; ok { + continue + } + seenLayerDigests[hex] = struct{}{} + + r, err := l.Compressed() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + blobSize, err := l.Size() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + + if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { + return sendProgressWriterReturn(pw, err) + } + } + } + if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(m), int64(len(m))); err != nil { + return sendProgressWriterReturn(pw, err) + } + + // be sure to close the tar writer so everything is flushed out before we send our EOF + if err := tf.Close(); err != nil { + return sendProgressWriterReturn(pw, err) + } + // send an EOF to indicate finished on the channel, but nil as our return error + _ = sendProgressWriterReturn(pw, io.EOF) + return nil +} + +// calculateManifest calculates the manifest and optionally the size of the tar file +func calculateManifest(imageToTags map[v1.Image][]string) (m Manifest, err error) { + if len(imageToTags) == 0 { + return nil, errors.New("set of images is empty") + } + + for img, tags := range imageToTags { + cfgName, err := img.ConfigName() + if err != nil { + return nil, err + } + + // Store foreign layer info. + layerSources := make(map[v1.Hash]v1.Descriptor) + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return nil, err + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return nil, err + } + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + // Add to LayerSources if it's a foreign layer. + desc, err := partial.BlobDescriptor(img, d) + if err != nil { + return nil, err + } + if !desc.MediaType.IsDistributable() { + diffid, err := partial.BlobToDiffID(img, d) + if err != nil { + return nil, err + } + layerSources[diffid] = *desc + } + } + + // Generate the tar descriptor and write it. + m = append(m, Descriptor{ + Config: cfgName.String(), + RepoTags: tags, + Layers: layerFiles, + LayerSources: layerSources, + }) + } + // sort by name of the repotags so it is consistent. Alternatively, we could sort by hash of the + // descriptor, but that would make it hard for humans to process + sort.Slice(m, func(i, j int) bool { + return strings.Join(m[i].RepoTags, ",") < strings.Join(m[j].RepoTags, ",") + }) + + return m, nil +} + +// CalculateSize calculates the expected complete size of the output tar file +func CalculateSize(refToImage map[name.Reference]v1.Image) (size int64, err error) { + imageToTags := dedupRefToImage(refToImage) + size, _, err = getSizeAndManifest(imageToTags) + return size, err +} + +func getSizeAndManifest(imageToTags map[v1.Image][]string) (int64, []byte, error) { + m, err := calculateManifest(imageToTags) + if err != nil { + return 0, nil, fmt.Errorf("unable to calculate manifest: %w", err) + } + mBytes, err := json.Marshal(m) + if err != nil { + return 0, nil, fmt.Errorf("could not marshall manifest to bytes: %w", err) + } + + size, err := calculateTarballSize(imageToTags, mBytes) + if err != nil { + return 0, nil, fmt.Errorf("error calculating tarball size: %w", err) + } + return size, mBytes, nil +} + +// calculateTarballSize calculates the size of the tar file +func calculateTarballSize(imageToTags map[v1.Image][]string, mBytes []byte) (size int64, err error) { + seenLayerDigests := make(map[string]struct{}) + for img, name := range imageToTags { + manifest, err := img.Manifest() + if err != nil { + return size, fmt.Errorf("unable to get manifest for img %s: %w", name, err) + } + size += calculateSingleFileInTarSize(manifest.Config.Size) + for _, l := range manifest.Layers { + hex := l.Digest.Hex + if _, ok := seenLayerDigests[hex]; ok { + continue + } + seenLayerDigests[hex] = struct{}{} + size += calculateSingleFileInTarSize(l.Size) + } + } + // add the manifest + size += calculateSingleFileInTarSize(int64(len(mBytes))) + + // add the two padding blocks that indicate end of a tar file + size += 1024 + return size, nil +} + +func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { + imageToTags := make(map[v1.Image][]string) + + for ref, img := range refToImage { + if tag, ok := ref.(name.Tag); ok { + if tags, ok := imageToTags[img]; !ok || tags == nil { + imageToTags[img] = []string{} + } + // Docker cannot load tarballs without an explicit tag: + // https://github.com/google/go-containerregistry/issues/890 + // + // We can't use the fully qualified tag.Name() because of rules_docker: + // https://github.com/google/go-containerregistry/issues/527 + // + // If the tag is "latest", but tag.String() doesn't end in ":latest", + // just append it. Kind of gross, but should work for now. + ts := tag.String() + if tag.Identifier() == name.DefaultTag && !strings.HasSuffix(ts, ":"+name.DefaultTag) { + ts = fmt.Sprintf("%s:%s", ts, name.DefaultTag) + } + imageToTags[img] = append(imageToTags[img], ts) + } else if _, ok := imageToTags[img]; !ok { + imageToTags[img] = nil + } + } + + return imageToTags +} + +// writeTarEntry writes a file to the provided writer with a corresponding tar header +func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { + hdr := &tar.Header{ + Mode: 0644, + Typeflag: tar.TypeReg, + Size: size, + Name: path, + } + if err := tf.WriteHeader(hdr); err != nil { + return err + } + _, err := io.Copy(tf, r) + return err +} + +// ComputeManifest get the manifest.json that will be written to the tarball +// for multiple references +func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) { + imageToTags := dedupRefToImage(refToImage) + return calculateManifest(imageToTags) +} + +// WriteOption a function option to pass to Write() +type WriteOption func(*writeOptions) error +type writeOptions struct { + updates chan<- v1.Update +} + +// WithProgress create a WriteOption for passing to Write() that enables +// a channel to receive updates as they are downloaded and written to disk. +func WithProgress(updates chan<- v1.Update) WriteOption { + return func(o *writeOptions) error { + o.updates = updates + return nil + } +} + +// progressWriter is a writer which will send the download progress +type progressWriter struct { + w io.Writer + updates chan<- v1.Update + size, complete int64 +} + +func (pw *progressWriter) Write(p []byte) (int, error) { + n, err := pw.w.Write(p) + if err != nil { + return n, err + } + + pw.complete += int64(n) + + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + } + + return n, err +} + +func (pw *progressWriter) Error(err error) error { + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + Error: err, + } + return err +} + +func (pw *progressWriter) Close() error { + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + Error: io.EOF, + } + return io.EOF +} + +// calculateSingleFileInTarSize calculate the size a file will take up in a tar archive, +// given the input data. Provided by rounding up to nearest whole block (512) +// and adding header 512 +func calculateSingleFileInTarSize(in int64) (out int64) { + // doing this manually, because math.Round() works with float64 + out += in + if remainder := out % 512; remainder != 0 { + out += (512 - remainder) + } + out += 512 + return out +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go new file mode 100644 index 0000000000..c86657d7b8 --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -0,0 +1,98 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types holds common OCI media types. +package types + +// MediaType is an enumeration of the supported mime types that an element of an image might have. +type MediaType string + +// The collection of known MediaType values. +const ( + OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" + OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" + OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" + OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" + OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd" + OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" + OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" + DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" + DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" + + OCIVendorPrefix = "vnd.oci" + DockerVendorPrefix = "vnd.docker" +) + +// IsDistributable returns true if a layer is distributable, see: +// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers +func (m MediaType) IsDistributable() bool { + switch m { + case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return false + } + return true +} + +// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index. +func (m MediaType) IsImage() bool { + switch m { + case OCIManifestSchema1, DockerManifestSchema2: + return true + } + return false +} + +// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image. +func (m MediaType) IsIndex() bool { + switch m { + case OCIImageIndex, DockerManifestList: + return true + } + return false +} + +// IsConfig returns true if the mediaType represents a config, as opposed to something else, like an image. +func (m MediaType) IsConfig() bool { + switch m { + case OCIConfigJSON, DockerConfigJSON: + return true + } + return false +} + +func (m MediaType) IsSchema1() bool { + switch m { + case DockerManifestSchema1, DockerManifestSchema1Signed: + return true + } + return false +} + +func (m MediaType) IsLayer() bool { + switch m { + case DockerLayer, DockerUncompressedLayer, OCILayer, OCILayerZStd, OCIUncompressedLayer, DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return true + } + return false +} diff --git a/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go new file mode 100644 index 0000000000..a47b7475ed --- /dev/null +++ b/openshift/tools/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -0,0 +1,339 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = new(HealthConfig) + (*in).DeepCopyInto(*out) + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Shell != nil { + in, out := &in.Shell, &out.Shell + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]History, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.RootFS.DeepCopyInto(&out.RootFS) + in.Config.DeepCopyInto(&out.Config) + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. +func (in *ConfigFile) DeepCopy() *ConfigFile { + if in == nil { + return nil + } + out := new(ConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Descriptor) DeepCopyInto(out *Descriptor) { + *out = *in + out.Digest = in.Digest + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(Platform) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. +func (in *Descriptor) DeepCopy() *Descriptor { + if in == nil { + return nil + } + out := new(Descriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hash) DeepCopyInto(out *Hash) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. +func (in *Hash) DeepCopy() *Hash { + if in == nil { + return nil + } + out := new(Hash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { + *out = *in + if in.Test != nil { + in, out := &in.Test, &out.Test + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. +func (in *HealthConfig) DeepCopy() *HealthConfig { + if in == nil { + return nil + } + out := new(HealthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *History) DeepCopyInto(out *History) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. +func (in *History) DeepCopy() *History { + if in == nil { + return nil + } + out := new(History) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { + *out = *in + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. +func (in *IndexManifest) DeepCopy() *IndexManifest { + if in == nil { + return nil + } + out := new(IndexManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manifest) DeepCopyInto(out *Manifest) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. +func (in *Manifest) DeepCopy() *Manifest { + if in == nil { + return nil + } + out := new(Manifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootFS) DeepCopyInto(out *RootFS) { + *out = *in + if in.DiffIDs != nil { + in, out := &in.DiffIDs, &out.DiffIDs + *out = make([]Hash, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. +func (in *RootFS) DeepCopy() *RootFS { + if in == nil { + return nil + } + out := new(RootFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/AUTHORS b/openshift/tools/vendor/github.com/google/go-github/v53/AUTHORS deleted file mode 100644 index 5e40cd1f38..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/AUTHORS +++ /dev/null @@ -1,433 +0,0 @@ -# This is the official list of go-github authors for copyright purposes. -# -# This does not necessarily list everyone who has contributed code, since in -# some cases, their employer may be the copyright holder. To see the full list -# of contributors, see the revision history in source control or -# https://github.com/google/go-github/graphs/contributors. -# -# Authors who wish to be recognized in this file should add themselves (or -# their employer, as appropriate). - -178inaba -2BFL -413x -Abed Kibbe -Abhinav Gupta -Abhishek Veeramalla -aboy -adrienzieba -afdesk -Ahmed Hagy -Aidan Steele -Ainsley Chong -ajz01 -Akeda Bagus -Akhil Mohan -Alec Thomas -Aleks Clark -Alex Bramley -Alex Orr -Alex Su -Alex Unger -Alexander Harkness -Alexis Gauthiez -Ali Farooq -Allan Guwatudde -Allen Sun -Amey Sakhadeo -Anders Janmyr -Andreas Garnæs -Andrew Ryabchun -Andrew Svoboda -Andy Grunwald -Andy Hume -Andy Lindeman -angie pinilla -anjanashenoy -Anshuman Bhartiya -Antoine -Antoine Pelisse -Anton Nguyen -Anubha Kushwaha -appilon -aprp -Aravind -Arda Kuyumcu -Arıl Bozoluk -Asier Marruedo -Austin Burdine -Austin Dizzy -Azuka Okuleye -Ben Batha -Benjamen Keroack -Beshr Kayali -Beyang Liu -Billy Keyes -Billy Lynch -Bjorn Neergaard -Björn Häuser -boljen -Bracken -Brad Harris -Brad Moylan -Bradley Falzon -Bradley McAllister -Brandon Butler -Brandon Cook -Brett Kuhlman -Brett Logan -Brian Egizi -Bryan Boreham -Bryan Peterson -Cami Diez -Carl Johnson -Carlos Alexandro Becker -Carlos Tadeu Panato Junior -ChandanChainani -chandresh-pancholi -Charles Fenwick Elliott -Charlie Yan -Chmouel Boudjnah -Chris King -Chris Mc -Chris Raborg -Chris Roche -Chris Schaefer -chrisforrette -Christian Bargmann -Christian Muehlhaeuser -Christoph Sassenberg -CI Monk -Colin Misare -Craig Gumbley -Craig Peterson -Cristian Maglie -Cyb3r Jak3 -Daehyeok Mun -Dalton Hubble -Daniel Lanner -Daniel Leavitt -Daniel Nilsson -Daoq -Dave Du Cros -Dave Henderson -Dave Perrett -Dave Protasowski -David Deng -David Gamba -David J. M. Karlsen -David Jannotta -David Ji -David Lopez Reyes -Davide Zipeto -Dennis Webb -Derek Jobst -DeviousLab -Dhi Aurrahman -Diego Lapiduz -Dmitri Shuralyov -dmnlk -Don Petersen -Doug Turner -Drew Fradette -Dustin Deus -Eivind -Eli Uriegas -Elliott Beach -Emerson Wood -Emil V -Eng Zer Jun -eperm -Erick Fejta -Erik Nobel -erwinvaneyk -Evan Elias -Fabian Holler -Fabrice -Fatema-Moaiyadi -Felix Geisendörfer -Filippo Valsorda -Florian Forster -Florian Wagner -Francesc Gil -Francis -Francisco Guimarães -François de Metz -Fredrik Jönsson -Gabriel -Garrett Squire -George Kontridze -Georgy Buranov -Glen Mailer -Gnahz -Google Inc. -Grachev Mikhail -griffin_stewie -Guillaume Jacquet -Guz Alexander -Guðmundur Bjarni Ólafsson -Hanno Hecker -Hari haran -Harikesh00 -haya14busa -haya14busa -Hiroki Ito -Hubot Jr -Huy Tr -huydx -i2bskn -Iain Steers -Ikko Ashimine -Ioannis Georgoulas -Isao Jonas -ishan upadhyay -isqua -Jacob Valdemar -Jake Krammer -Jake White -Jameel Haffejee -James Bowes -James Cockbain -James Loh -Jamie West -Jan Kosecki -Jan Švábík -Javier Campanini -Jef LeCompte -Jens Rantil -Jeremy Morris -Jesse Haka -Jesse Newland -Jihoon Chung -Jille Timmermans -Jimmi Dyson -Joan Saum -Joe Tsai -John Barton -John Engelman -John Jones -John Liu -Jordan Brockopp -Jordan Sussman -Joshua Bezaleel Abednego -João Cerqueira -JP Phillips -jpbelanger-mtl -Juan -Juan Basso -Julien Garcia Gonzalez -Julien Rostand -Junya Kono -Justin Abrahms -Justin Toh -Jusung Lee -jzhoucliqr -k1rnt -kadern0 -Katrina Owen -Kautilya Tripathi -Keita Urashima -Kevin Burke -Kevin Wang -Kevin Zhao -Kirill -Konrad Malawski -Kookheon Kwon -Krishna Indani -Krzysztof Kowalczyk -Kshitij Saraogi -Kumar Saurabh -Kyle Kurz -kyokomi -Laurent Verdoïa -leopoldwang -Liam Galvin -Lluis Campos -Lovro Mažgon -Luca Campese -Lucas Alcantara -Luis Davim -Luke Evers -Luke Kysow -Luke Roberts -Luke Young -lynn [they] -Maksim Zhylinski -Marc Binder -Marcelo Carlos -Mark Tareshawty -Martin Holman -Martin-Louis Bright -Martins Sipenko -Marwan Sulaiman -Masayuki Izumi -Mat Geist -Matija Horvat -Matin Rahmanian -Matt -Matt Brender -Matt Gaunt -Matt Landis -Matt Moore -Maxime Bury -Michael Meng -Michael Spiegel -Michael Tiller -Michał Glapa -Michelangelo Morrillo -Miguel Elias dos Santos -Mike Chen -Mohammed AlDujaili -Mukundan Senthil -Munia Balayil -Mustafa Abban -Nadav Kaner -Nathan VanBenschoten -Navaneeth Suresh -Neil O'Toole -Nick Miyake -Nick Platt -Nick Spragg -Nikhita Raghunath -Nilesh Singh -Noah Hanjun Lee -Noah Zoschke -ns-cweber -Ole Orhagen -Oleg Kovalov -Ondřej Kupka -Ori Talmor -Pablo Pérez Schröder -Palash Nigam -Panagiotis Moustafellos -Parham Alvani -pari-27 -Parker Moore -parkhyukjun89 -Pat Alwell -Patrick DeVivo -Patrick Marabeas -Pavel Dvoinos -Pavel Shtanko -Pete Wagner -Petr Shevtsov -Pierce McEntagart -Pierre Carrier -Piotr Zurek -Piyush Chugh -Pratik Mallya -Qais Patankar -Quang Le Hong -Quentin Leffray -Quinn Slack -Rackspace US, Inc. -Radek Simko -Radliński Ignacy -Rajat Jindal -Rajendra arora -Rajkumar -Ranbir Singh -Ravi Shekhar Jethani -RaviTeja Pothana -rc1140 -Red Hat, Inc. -Reetuparna Mukherjee -reeves122 -Reinier Timmer -Renjith R -Ricco Førgaard -Richard de Vries -Rob Figueiredo -Rohit Upadhyay -Rojan Dinc -Ronak Jain -Ronan Pelliard -Ross Gustafson -Ruben Vereecken -Russell Boley -Ryan Leung -Ryan Lower -Ryo Nakao -Saaarah -Safwan Olaimat -Sahil Dua -saisi -Sam Minnée -Sandeep Sukhani -Sander Knape -Sander van Harmelen -Sanket Payghan -Sarah Funkhouser -Sarasa Kisaragi -Sasha Melentyev -Sean Wang -Sebastian Mandrean -Sebastian Mæland Pedersen -Sergei Popinevskii -Sergey Romanov -Sergio Garcia -Seth Vargo -Sevki -Shagun Khemka -shakeelrao -Shawn Catanzarite -Shawn Smith -Shibasis Patel -Sho Okada -Shrikrishna Singh -Simon Davis -sona-tar -SoundCloud, Ltd. -Sridhar Mocherla -SriVignessh Pss -Stefan Sedich -Steve Teuber -Stian Eikeland -Suhaib Mujahid -sushmita wable -Szymon Kodrebski -Søren Hansen -Takashi Yoneuchi -Takayuki Watanabe -Taketoshi Fujiwara -Taketoshi Fujiwara -Takuma Kajikawa -Tasya Aditya Rukmana -Theo Henson -Theofilos Petsios -Thomas Aidan Curran -Thomas Bruyelle -Tim Rogers -Timothée Peignier -Tingluo Huang -tkhandel -Tobias Gesellchen -Tom Payne -Trey Tacon -tsbkw -ttacon -Vaibhav Singh -Varadarajan Aravamudhan -Victor Castell -Victor Vrantchan -vikkyomkar -Vlad Ungureanu -Wasim Thabraze -Weslei Juan Moser Pereira -Will Maier -Willem D'Haeseleer -William Bailey -William Cooke -Xabi -xibz -Yann Malet -Yannick Utard -Yicheng Qin -Yosuke Akatsuka -Yumikiyo Osanai -Yusef Mohamadi -Yusuke Kuoka -Zach Latta -zhouhaibing089 -六开箱 -缘生 diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/LICENSE b/openshift/tools/vendor/github.com/google/go-github/v53/LICENSE deleted file mode 100644 index 28b6486f0b..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 The go-github AUTHORS. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions.go deleted file mode 100644 index 8d552f2d0d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// ActionsService handles communication with the actions related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/ -type ActionsService service diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_artifacts.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_artifacts.go deleted file mode 100644 index 441a53910e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_artifacts.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/http" - "net/url" -) - -// ArtifactWorkflowRun represents a GitHub artifact's workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts -type ArtifactWorkflowRun struct { - ID *int64 `json:"id,omitempty"` - RepositoryID *int64 `json:"repository_id,omitempty"` - HeadRepositoryID *int64 `json:"head_repository_id,omitempty"` - HeadBranch *string `json:"head_branch,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` -} - -// Artifact represents a GitHub artifact. Artifacts allow sharing -// data between jobs in a workflow and provide storage for data -// once a workflow is complete. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts -type Artifact struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - SizeInBytes *int64 `json:"size_in_bytes,omitempty"` - URL *string `json:"url,omitempty"` - ArchiveDownloadURL *string `json:"archive_download_url,omitempty"` - Expired *bool `json:"expired,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - ExpiresAt *Timestamp `json:"expires_at,omitempty"` - WorkflowRun *ArtifactWorkflowRun `json:"workflow_run,omitempty"` -} - -// ArtifactList represents a list of GitHub artifacts. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#artifacts -type ArtifactList struct { - TotalCount *int64 `json:"total_count,omitempty"` - Artifacts []*Artifact `json:"artifacts,omitempty"` -} - -// ListArtifacts lists all artifacts that belong to a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#list-artifacts-for-a-repository -func (s *ActionsService) ListArtifacts(ctx context.Context, owner, repo string, opts *ListOptions) (*ArtifactList, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/artifacts", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - artifactList := new(ArtifactList) - resp, err := s.client.Do(ctx, req, artifactList) - if err != nil { - return nil, resp, err - } - - return artifactList, resp, nil -} - -// ListWorkflowRunArtifacts lists all artifacts that belong to a workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#list-workflow-run-artifacts -func (s *ActionsService) ListWorkflowRunArtifacts(ctx context.Context, owner, repo string, runID int64, opts *ListOptions) (*ArtifactList, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/artifacts", owner, repo, runID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - artifactList := new(ArtifactList) - resp, err := s.client.Do(ctx, req, artifactList) - if err != nil { - return nil, resp, err - } - - return artifactList, resp, nil -} - -// GetArtifact gets a specific artifact for a workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#get-an-artifact -func (s *ActionsService) GetArtifact(ctx context.Context, owner, repo string, artifactID int64) (*Artifact, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v", owner, repo, artifactID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - artifact := new(Artifact) - resp, err := s.client.Do(ctx, req, artifact) - if err != nil { - return nil, resp, err - } - - return artifact, resp, nil -} - -// DownloadArtifact gets a redirect URL to download an archive for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#download-an-artifact -func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo string, artifactID int64, followRedirects bool) (*url.URL, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v/zip", owner, repo, artifactID) - - resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusFound { - return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) - } - - parsedURL, err := url.Parse(resp.Header.Get("Location")) - if err != nil { - return nil, newResponse(resp), err - } - - return parsedURL, newResponse(resp), nil -} - -// DeleteArtifact deletes a workflow run artifact. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#delete-an-artifact -func (s *ActionsService) DeleteArtifact(ctx context.Context, owner, repo string, artifactID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v", owner, repo, artifactID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_cache.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_cache.go deleted file mode 100644 index 9592d9ab62..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_cache.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ActionsCache represents a GitHub action cache. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#about-the-cache-api -type ActionsCache struct { - ID *int64 `json:"id,omitempty" url:"-"` - Ref *string `json:"ref,omitempty" url:"ref"` - Key *string `json:"key,omitempty" url:"key"` - Version *string `json:"version,omitempty" url:"-"` - LastAccessedAt *Timestamp `json:"last_accessed_at,omitempty" url:"-"` - CreatedAt *Timestamp `json:"created_at,omitempty" url:"-"` - SizeInBytes *int64 `json:"size_in_bytes,omitempty" url:"-"` -} - -// ActionsCacheList represents a list of GitHub actions Cache. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-github-actions-caches-for-a-repository -type ActionsCacheList struct { - TotalCount int `json:"total_count"` - ActionsCaches []*ActionsCache `json:"actions_caches,omitempty"` -} - -// ActionsCacheUsage represents a GitHub Actions Cache Usage object. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-a-repository -type ActionsCacheUsage struct { - FullName string `json:"full_name"` - ActiveCachesSizeInBytes int64 `json:"active_caches_size_in_bytes"` - ActiveCachesCount int `json:"active_caches_count"` -} - -// ActionsCacheUsageList represents a list of repositories with GitHub Actions cache usage for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-a-repository -type ActionsCacheUsageList struct { - TotalCount int `json:"total_count"` - RepoCacheUsage []*ActionsCacheUsage `json:"repository_cache_usages,omitempty"` -} - -// TotalCacheUsage represents total GitHub actions cache usage of an organization or enterprise. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-an-enterprise -type TotalCacheUsage struct { - TotalActiveCachesUsageSizeInBytes int64 `json:"total_active_caches_size_in_bytes"` - TotalActiveCachesCount int `json:"total_active_caches_count"` -} - -// ActionsCacheListOptions represents a list of all possible optional Query parameters for ListCaches method. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-github-actions-caches-for-a-repository -type ActionsCacheListOptions struct { - ListOptions - // The Git reference for the results you want to list. - // The ref for a branch can be formatted either as refs/heads/ - // or simply . To reference a pull request use refs/pull//merge - Ref *string `url:"ref,omitempty"` - Key *string `url:"key,omitempty"` - // Can be one of: "created_at", "last_accessed_at", "size_in_bytes". Default: "last_accessed_at" - Sort *string `url:"sort,omitempty"` - // Can be one of: "asc", "desc" Default: desc - Direction *string `url:"direction,omitempty"` -} - -// ListCaches lists the GitHub Actions caches for a repository. -// You must authenticate using an access token with the repo scope to use this endpoint. -// -// Permissions: must have the actions:read permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-github-actions-caches-for-a-repository -func (s *ActionsService) ListCaches(ctx context.Context, owner, repo string, opts *ActionsCacheListOptions) (*ActionsCacheList, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/caches", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - actionCacheList := new(ActionsCacheList) - resp, err := s.client.Do(ctx, req, actionCacheList) - if err != nil { - return nil, resp, err - } - - return actionCacheList, resp, nil -} - -// DeleteCachesByKey deletes one or more GitHub Actions caches for a repository, using a complete cache key. -// By default, all caches that match the provided key are deleted, but you can optionally provide -// a Git ref to restrict deletions to caches that match both the provided key and the Git ref. -// The ref for a branch can be formatted either as "refs/heads/" or simply "". -// To reference a pull request use "refs/pull//merge". If you don't want to use ref just pass nil in parameter. -// -// Permissions: You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have the actions:write permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-github-actions-caches-for-a-repository-using-a-cache-key -func (s *ActionsService) DeleteCachesByKey(ctx context.Context, owner, repo, key string, ref *string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/caches", owner, repo) - u, err := addOptions(u, ActionsCache{Key: &key, Ref: ref}) - if err != nil { - return nil, err - } - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteCachesByID deletes a GitHub Actions cache for a repository, using a cache ID. -// -// Permissions: You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have the actions:write permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-a-github-actions-cache-for-a-repository-using-a-cache-id -func (s *ActionsService) DeleteCachesByID(ctx context.Context, owner, repo string, cacheID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/caches/%v", owner, repo, cacheID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetCacheUsageForRepo gets GitHub Actions cache usage for a repository. The data fetched using this API is refreshed approximately every 5 minutes, -// so values returned from this endpoint may take at least 5 minutes to get updated. -// -// Permissions: Anyone with read access to the repository can use this endpoint. If the repository is private, you must use an -// access token with the repo scope. GitHub Apps must have the actions:read permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-a-repository -func (s *ActionsService) GetCacheUsageForRepo(ctx context.Context, owner, repo string) (*ActionsCacheUsage, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/cache/usage", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - cacheUsage := new(ActionsCacheUsage) - res, err := s.client.Do(ctx, req, cacheUsage) - if err != nil { - return nil, res, err - } - - return cacheUsage, res, err -} - -// ListCacheUsageByRepoForOrg lists repositories and their GitHub Actions cache usage for an organization. The data fetched using this API is -// refreshed approximately every 5 minutes, so values returned from this endpoint may take at least 5 minutes to get updated. -// -// Permissions: You must authenticate using an access token with the read:org scope to use this endpoint. -// GitHub Apps must have the organization_admistration:read permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-repositories-with-github-actions-cache-usage-for-an-organization -func (s *ActionsService) ListCacheUsageByRepoForOrg(ctx context.Context, org string, opts *ListOptions) (*ActionsCacheUsageList, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/cache/usage-by-repository", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - cacheUsage := new(ActionsCacheUsageList) - res, err := s.client.Do(ctx, req, cacheUsage) - if err != nil { - return nil, res, err - } - - return cacheUsage, res, err -} - -// GetTotalCacheUsageForOrg gets the total GitHub Actions cache usage for an organization. The data fetched using this API is refreshed approximately every -// 5 minutes, so values returned from this endpoint may take at least 5 minutes to get updated. -// -// Permissions: You must authenticate using an access token with the read:org scope to use this endpoint. -// GitHub Apps must have the organization_admistration:read permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-an-organization -func (s *ActionsService) GetTotalCacheUsageForOrg(ctx context.Context, org string) (*TotalCacheUsage, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/cache/usage", org) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - cacheUsage := new(TotalCacheUsage) - res, err := s.client.Do(ctx, req, cacheUsage) - if err != nil { - return nil, res, err - } - - return cacheUsage, res, err -} - -// GetTotalCacheUsageForEnterprise gets the total GitHub Actions cache usage for an enterprise. The data fetched using this API is refreshed approximately every 5 minutes, -// so values returned from this endpoint may take at least 5 minutes to get updated. -// -// Permissions: You must authenticate using an access token with the "admin:enterprise" scope to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-an-enterprise -func (s *ActionsService) GetTotalCacheUsageForEnterprise(ctx context.Context, enterprise string) (*TotalCacheUsage, *Response, error) { - u := fmt.Sprintf("enterprises/%v/actions/cache/usage", enterprise) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - cacheUsage := new(TotalCacheUsage) - res, err := s.client.Do(ctx, req, cacheUsage) - if err != nil { - return nil, res, err - } - - return cacheUsage, res, err -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_oidc.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_oidc.go deleted file mode 100644 index b7f2d26ae9..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_oidc.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// OIDCSubjectClaimCustomTemplate represents an OIDC subject claim customization template. -type OIDCSubjectClaimCustomTemplate struct { - UseDefault *bool `json:"use_default,omitempty"` - IncludeClaimKeys []string `json:"include_claim_keys,omitempty"` -} - -// GetOrgOIDCSubjectClaimCustomTemplate gets the subject claim customization template for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#get-the-customization-template-for-an-oidc-subject-claim-for-an-organization -func (s *ActionsService) GetOrgOIDCSubjectClaimCustomTemplate(ctx context.Context, org string) (*OIDCSubjectClaimCustomTemplate, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/oidc/customization/sub", org) - return s.getOIDCSubjectClaimCustomTemplate(ctx, u) -} - -// GetRepoOIDCSubjectClaimCustomTemplate gets the subject claim customization template for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#get-the-customization-template-for-an-oidc-subject-claim-for-a-repository -func (s *ActionsService) GetRepoOIDCSubjectClaimCustomTemplate(ctx context.Context, owner, repo string) (*OIDCSubjectClaimCustomTemplate, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/oidc/customization/sub", owner, repo) - return s.getOIDCSubjectClaimCustomTemplate(ctx, u) -} - -func (s *ActionsService) getOIDCSubjectClaimCustomTemplate(ctx context.Context, url string) (*OIDCSubjectClaimCustomTemplate, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - tmpl := new(OIDCSubjectClaimCustomTemplate) - resp, err := s.client.Do(ctx, req, tmpl) - if err != nil { - return nil, resp, err - } - - return tmpl, resp, nil -} - -// SetOrgOIDCSubjectClaimCustomTemplate sets the subject claim customization for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#set-the-customization-template-for-an-oidc-subject-claim-for-an-organization -func (s *ActionsService) SetOrgOIDCSubjectClaimCustomTemplate(ctx context.Context, org string, template *OIDCSubjectClaimCustomTemplate) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/oidc/customization/sub", org) - return s.setOIDCSubjectClaimCustomTemplate(ctx, u, template) -} - -// SetRepoOIDCSubjectClaimCustomTemplate sets the subject claim customization for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#set-the-customization-template-for-an-oidc-subject-claim-for-a-repository -func (s *ActionsService) SetRepoOIDCSubjectClaimCustomTemplate(ctx context.Context, owner, repo string, template *OIDCSubjectClaimCustomTemplate) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/oidc/customization/sub", owner, repo) - return s.setOIDCSubjectClaimCustomTemplate(ctx, u, template) -} - -func (s *ActionsService) setOIDCSubjectClaimCustomTemplate(ctx context.Context, url string, template *OIDCSubjectClaimCustomTemplate) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, template) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go deleted file mode 100644 index 3566eb9d20..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// OrgRequiredWorkflow represents a required workflow object at the org level. -type OrgRequiredWorkflow struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - Scope *string `json:"scope,omitempty"` - Ref *string `json:"ref,omitempty"` - State *string `json:"state,omitempty"` - SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Repository *Repository `json:"repository,omitempty"` -} - -// OrgRequiredWorkflows represents the required workflows for the org. -type OrgRequiredWorkflows struct { - TotalCount *int `json:"total_count,omitempty"` - RequiredWorkflows []*OrgRequiredWorkflow `json:"required_workflows,omitempty"` -} - -// CreateUpdateRequiredWorkflowOptions represents the input object used to create or update required workflows. -type CreateUpdateRequiredWorkflowOptions struct { - WorkflowFilePath *string `json:"workflow_file_path,omitempty"` - RepositoryID *int64 `json:"repository_id,omitempty"` - Scope *string `json:"scope,omitempty"` - SelectedRepositoryIDs *SelectedRepoIDs `json:"selected_repository_ids,omitempty"` -} - -// RequiredWorkflowSelectedRepos represents the repos that a required workflow is applied to. -type RequiredWorkflowSelectedRepos struct { - TotalCount *int `json:"total_count,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` -} - -// RepoRequiredWorkflow represents a required workflow object at the repo level. -type RepoRequiredWorkflow struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - State *string `json:"state,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - BadgeURL *string `json:"badge_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - SourceRepository *Repository `json:"source_repository,omitempty"` -} - -// RepoRequiredWorkflows represents the required workflows for a repo. -type RepoRequiredWorkflows struct { - TotalCount *int `json:"total_count,omitempty"` - RequiredWorkflows []*RepoRequiredWorkflow `json:"required_workflows,omitempty"` -} - -// ListOrgRequiredWorkflows lists the RequiredWorkflows for an org. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows -func (s *ActionsService) ListOrgRequiredWorkflows(ctx context.Context, org string, opts *ListOptions) (*OrgRequiredWorkflows, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/required_workflows", org) - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - requiredWorkflows := new(OrgRequiredWorkflows) - resp, err := s.client.Do(ctx, req, &requiredWorkflows) - if err != nil { - return nil, resp, err - } - - return requiredWorkflows, resp, nil -} - -// CreateRequiredWorkflow creates the required workflow in an org. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#create-a-required-workflow -func (s *ActionsService) CreateRequiredWorkflow(ctx context.Context, org string, createRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/required_workflows", org) - req, err := s.client.NewRequest("POST", url, createRequiredWorkflowOptions) - if err != nil { - return nil, nil, err - } - - orgRequiredWorkflow := new(OrgRequiredWorkflow) - resp, err := s.client.Do(ctx, req, orgRequiredWorkflow) - if err != nil { - return nil, resp, err - } - - return orgRequiredWorkflow, resp, nil -} - -// GetRequiredWorkflowByID get the RequiredWorkflows for an org by its ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows -func (s *ActionsService) GetRequiredWorkflowByID(ctx context.Context, owner string, requiredWorkflowID int64) (*OrgRequiredWorkflow, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", owner, requiredWorkflowID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - requiredWorkflow := new(OrgRequiredWorkflow) - resp, err := s.client.Do(ctx, req, &requiredWorkflow) - if err != nil { - return nil, resp, err - } - - return requiredWorkflow, resp, nil -} - -// UpdateRequiredWorkflow updates a required workflow in an org. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow -func (s *ActionsService) UpdateRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64, updateRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID) - req, err := s.client.NewRequest("PATCH", url, updateRequiredWorkflowOptions) - if err != nil { - return nil, nil, err - } - - orgRequiredWorkflow := new(OrgRequiredWorkflow) - resp, err := s.client.Do(ctx, req, orgRequiredWorkflow) - if err != nil { - return nil, resp, err - } - - return orgRequiredWorkflow, resp, nil -} - -// DeleteRequiredWorkflow deletes a required workflow in an org. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow -func (s *ActionsService) DeleteRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID) - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// ListRequiredWorkflowSelectedRepos lists the Repositories selected for a workflow. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-selected-repositories-for-a-required-workflow -func (s *ActionsService) ListRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, opts *ListOptions) (*RequiredWorkflowSelectedRepos, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID) - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - requiredWorkflowRepos := new(RequiredWorkflowSelectedRepos) - resp, err := s.client.Do(ctx, req, &requiredWorkflowRepos) - if err != nil { - return nil, resp, err - } - - return requiredWorkflowRepos, resp, nil -} - -// SetRequiredWorkflowSelectedRepos sets the Repositories selected for a workflow. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#sets-repositories-for-a-required-workflow -func (s *ActionsService) SetRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, ids SelectedRepoIDs) (*Response, error) { - type repoIDs struct { - SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` - } - url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID) - req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// AddRepoToRequiredWorkflow adds the Repository to a required workflow. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow -func (s *ActionsService) AddRepoToRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID) - req, err := s.client.NewRequest("PUT", url, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// RemoveRepoFromRequiredWorkflow removes the Repository from a required workflow. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow -func (s *ActionsService) RemoveRepoFromRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID) - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// ListRepoRequiredWorkflows lists the RequiredWorkflows for a repo. -// -// Github API docs:https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-repository-required-workflows -func (s *ActionsService) ListRepoRequiredWorkflows(ctx context.Context, owner, repo string, opts *ListOptions) (*RepoRequiredWorkflows, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/required_workflows", owner, repo) - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - requiredWorkflows := new(RepoRequiredWorkflows) - resp, err := s.client.Do(ctx, req, &requiredWorkflows) - if err != nil { - return nil, resp, err - } - - return requiredWorkflows, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_runner_groups.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_runner_groups.go deleted file mode 100644 index 00b9b6ce09..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_runner_groups.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RunnerGroup represents a self-hosted runner group configured in an organization. -type RunnerGroup struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Visibility *string `json:"visibility,omitempty"` - Default *bool `json:"default,omitempty"` - SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"` - RunnersURL *string `json:"runners_url,omitempty"` - Inherited *bool `json:"inherited,omitempty"` - AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` - RestrictedToWorkflows *bool `json:"restricted_to_workflows,omitempty"` - SelectedWorkflows []string `json:"selected_workflows,omitempty"` - WorkflowRestrictionsReadOnly *bool `json:"workflow_restrictions_read_only,omitempty"` -} - -// RunnerGroups represents a collection of self-hosted runner groups configured for an organization. -type RunnerGroups struct { - TotalCount int `json:"total_count"` - RunnerGroups []*RunnerGroup `json:"runner_groups"` -} - -// CreateRunnerGroupRequest represents a request to create a Runner group for an organization. -type CreateRunnerGroupRequest struct { - Name *string `json:"name,omitempty"` - Visibility *string `json:"visibility,omitempty"` - // List of repository IDs that can access the runner group. - SelectedRepositoryIDs []int64 `json:"selected_repository_ids,omitempty"` - // Runners represent a list of runner IDs to add to the runner group. - Runners []int64 `json:"runners,omitempty"` - // If set to True, public repos can use this runner group - AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` - // If true, the runner group will be restricted to running only the workflows specified in the SelectedWorkflows slice. - RestrictedToWorkflows *bool `json:"restricted_to_workflows,omitempty"` - // List of workflows the runner group should be allowed to run. This setting will be ignored unless RestrictedToWorkflows is set to true. - SelectedWorkflows []string `json:"selected_workflows,omitempty"` -} - -// UpdateRunnerGroupRequest represents a request to update a Runner group for an organization. -type UpdateRunnerGroupRequest struct { - Name *string `json:"name,omitempty"` - Visibility *string `json:"visibility,omitempty"` - AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` - RestrictedToWorkflows *bool `json:"restricted_to_workflows,omitempty"` - SelectedWorkflows []string `json:"selected_workflows,omitempty"` -} - -// SetRepoAccessRunnerGroupRequest represents a request to replace the list of repositories -// that can access a self-hosted runner group configured in an organization. -type SetRepoAccessRunnerGroupRequest struct { - // Updated list of repository IDs that should be given access to the runner group. - SelectedRepositoryIDs []int64 `json:"selected_repository_ids"` -} - -// SetRunnerGroupRunnersRequest represents a request to replace the list of -// self-hosted runners that are part of an organization runner group. -type SetRunnerGroupRunnersRequest struct { - // Updated list of runner IDs that should be given access to the runner group. - Runners []int64 `json:"runners"` -} - -// ListOrgRunnerGroupOptions extend ListOptions to have the optional parameters VisibleToRepository. -type ListOrgRunnerGroupOptions struct { - ListOptions - - // Only return runner groups that are allowed to be used by this repository. - VisibleToRepository string `url:"visible_to_repository,omitempty"` -} - -// ListOrganizationRunnerGroups lists all self-hosted runner groups configured in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#list-self-hosted-runner-groups-for-an-organization -func (s *ActionsService) ListOrganizationRunnerGroups(ctx context.Context, org string, opts *ListOrgRunnerGroupOptions) (*RunnerGroups, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - groups := &RunnerGroups{} - resp, err := s.client.Do(ctx, req, &groups) - if err != nil { - return nil, resp, err - } - - return groups, resp, nil -} - -// GetOrganizationRunnerGroup gets a specific self-hosted runner group for an organization using its RunnerGroup ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#get-a-self-hosted-runner-group-for-an-organization -func (s *ActionsService) GetOrganizationRunnerGroup(ctx context.Context, org string, groupID int64) (*RunnerGroup, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v", org, groupID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runnerGroup := new(RunnerGroup) - resp, err := s.client.Do(ctx, req, runnerGroup) - if err != nil { - return nil, resp, err - } - - return runnerGroup, resp, nil -} - -// DeleteOrganizationRunnerGroup deletes a self-hosted runner group from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#delete-a-self-hosted-runner-group-from-an-organization -func (s *ActionsService) DeleteOrganizationRunnerGroup(ctx context.Context, org string, groupID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v", org, groupID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// CreateOrganizationRunnerGroup creates a new self-hosted runner group for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#create-a-self-hosted-runner-group-for-an-organization -func (s *ActionsService) CreateOrganizationRunnerGroup(ctx context.Context, org string, createReq CreateRunnerGroupRequest) (*RunnerGroup, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups", org) - req, err := s.client.NewRequest("POST", u, createReq) - if err != nil { - return nil, nil, err - } - - runnerGroup := new(RunnerGroup) - resp, err := s.client.Do(ctx, req, runnerGroup) - if err != nil { - return nil, resp, err - } - - return runnerGroup, resp, nil -} - -// UpdateOrganizationRunnerGroup updates a self-hosted runner group for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#update-a-self-hosted-runner-group-for-an-organization -func (s *ActionsService) UpdateOrganizationRunnerGroup(ctx context.Context, org string, groupID int64, updateReq UpdateRunnerGroupRequest) (*RunnerGroup, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v", org, groupID) - req, err := s.client.NewRequest("PATCH", u, updateReq) - if err != nil { - return nil, nil, err - } - - runnerGroup := new(RunnerGroup) - resp, err := s.client.Do(ctx, req, runnerGroup) - if err != nil { - return nil, resp, err - } - - return runnerGroup, resp, nil -} - -// ListRepositoryAccessRunnerGroup lists the repositories with access to a self-hosted runner group configured in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#list-repository-access-to-a-self-hosted-runner-group-in-an-organization -func (s *ActionsService) ListRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID int64, opts *ListOptions) (*ListRepositories, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories", org, groupID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - repos := &ListRepositories{} - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// SetRepositoryAccessRunnerGroup replaces the list of repositories that have access to a self-hosted runner group configured in an organization -// with a new List of repositories. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#set-repository-access-for-a-self-hosted-runner-group-in-an-organization -func (s *ActionsService) SetRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID int64, ids SetRepoAccessRunnerGroupRequest) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories", org, groupID) - - req, err := s.client.NewRequest("PUT", u, ids) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// AddRepositoryAccessRunnerGroup adds a repository to the list of selected repositories that can access a self-hosted runner group. -// The runner group must have visibility set to 'selected'. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#add-repository-access-to-a-self-hosted-runner-group-in-an-organization -func (s *ActionsService) AddRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID, repoID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories/%v", org, groupID, repoID) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveRepositoryAccessRunnerGroup removes a repository from the list of selected repositories that can access a self-hosted runner group. -// The runner group must have visibility set to 'selected'. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#remove-repository-access-to-a-self-hosted-runner-group-in-an-organization -func (s *ActionsService) RemoveRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID, repoID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories/%v", org, groupID, repoID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListRunnerGroupRunners lists self-hosted runners that are in a specific organization group. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#list-self-hosted-runners-in-a-group-for-an-organization -func (s *ActionsService) ListRunnerGroupRunners(ctx context.Context, org string, groupID int64, opts *ListOptions) (*Runners, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners", org, groupID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runners := &Runners{} - resp, err := s.client.Do(ctx, req, &runners) - if err != nil { - return nil, resp, err - } - - return runners, resp, nil -} - -// SetRunnerGroupRunners replaces the list of self-hosted runners that are part of an organization runner group -// with a new list of runners. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#set-self-hosted-runners-in-a-group-for-an-organization -func (s *ActionsService) SetRunnerGroupRunners(ctx context.Context, org string, groupID int64, ids SetRunnerGroupRunnersRequest) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners", org, groupID) - - req, err := s.client.NewRequest("PUT", u, ids) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// AddRunnerGroupRunners adds a self-hosted runner to a runner group configured in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#add-a-self-hosted-runner-to-a-group-for-an-organization -func (s *ActionsService) AddRunnerGroupRunners(ctx context.Context, org string, groupID, runnerID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners/%v", org, groupID, runnerID) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveRunnerGroupRunners removes a self-hosted runner from a group configured in an organization. -// The runner is then returned to the default group. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#remove-a-self-hosted-runner-from-a-group-for-an-organization -func (s *ActionsService) RemoveRunnerGroupRunners(ctx context.Context, org string, groupID, runnerID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners/%v", org, groupID, runnerID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_runners.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_runners.go deleted file mode 100644 index 3990a5a90f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_runners.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RunnerApplicationDownload represents a binary for the self-hosted runner application that can be downloaded. -type RunnerApplicationDownload struct { - OS *string `json:"os,omitempty"` - Architecture *string `json:"architecture,omitempty"` - DownloadURL *string `json:"download_url,omitempty"` - Filename *string `json:"filename,omitempty"` - TempDownloadToken *string `json:"temp_download_token,omitempty"` - SHA256Checksum *string `json:"sha256_checksum,omitempty"` -} - -// ActionsEnabledOnOrgRepos represents all the repositories in an organization for which Actions is enabled. -type ActionsEnabledOnOrgRepos struct { - TotalCount int `json:"total_count"` - Repositories []*Repository `json:"repositories"` -} - -// ListRunnerApplicationDownloads lists self-hosted runner application binaries that can be downloaded and run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-runner-applications-for-a-repository -func (s *ActionsService) ListRunnerApplicationDownloads(ctx context.Context, owner, repo string) ([]*RunnerApplicationDownload, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runners/downloads", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rads []*RunnerApplicationDownload - resp, err := s.client.Do(ctx, req, &rads) - if err != nil { - return nil, resp, err - } - - return rads, resp, nil -} - -// GenerateJITConfigRequest specifies body parameters to GenerateRepoJITConfig. -type GenerateJITConfigRequest struct { - Name string `json:"name"` - RunnerGroupID int64 `json:"runner_group_id"` - WorkFolder *string `json:"work_folder,omitempty"` - - // Labels represents the names of the custom labels to add to the runner. - // Minimum items: 1. Maximum items: 100. - Labels []string `json:"labels"` -} - -// JITRunnerConfig represents encoded JIT configuration that can be used to bootstrap a self-hosted runner. -type JITRunnerConfig struct { - EncodedJITConfig *string `json:"encoded_jit_config,omitempty"` -} - -// GenerateOrgJITConfig generate a just-in-time configuration for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-configuration-for-a-just-in-time-runner-for-an-organization -func (s *ActionsService) GenerateOrgJITConfig(ctx context.Context, owner string, request *GenerateJITConfigRequest) (*JITRunnerConfig, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runners/generate-jitconfig", owner) - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - jitConfig := new(JITRunnerConfig) - resp, err := s.client.Do(ctx, req, jitConfig) - if err != nil { - return nil, resp, err - } - - return jitConfig, resp, nil -} - -// GenerateRepoJITConfig generates a just-in-time configuration for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-configuration-for-a-just-in-time-runner-for-a-repository -func (s *ActionsService) GenerateRepoJITConfig(ctx context.Context, owner, repo string, request *GenerateJITConfigRequest) (*JITRunnerConfig, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runners/generate-jitconfig", owner, repo) - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - jitConfig := new(JITRunnerConfig) - resp, err := s.client.Do(ctx, req, jitConfig) - if err != nil { - return nil, resp, err - } - - return jitConfig, resp, nil -} - -// RegistrationToken represents a token that can be used to add a self-hosted runner to a repository. -type RegistrationToken struct { - Token *string `json:"token,omitempty"` - ExpiresAt *Timestamp `json:"expires_at,omitempty"` -} - -// CreateRegistrationToken creates a token that can be used to add a self-hosted runner. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-a-repository -func (s *ActionsService) CreateRegistrationToken(ctx context.Context, owner, repo string) (*RegistrationToken, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runners/registration-token", owner, repo) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - registrationToken := new(RegistrationToken) - resp, err := s.client.Do(ctx, req, registrationToken) - if err != nil { - return nil, resp, err - } - - return registrationToken, resp, nil -} - -// Runner represents a self-hosted runner registered with a repository. -type Runner struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - OS *string `json:"os,omitempty"` - Status *string `json:"status,omitempty"` - Busy *bool `json:"busy,omitempty"` - Labels []*RunnerLabels `json:"labels,omitempty"` -} - -// RunnerLabels represents a collection of labels attached to each runner. -type RunnerLabels struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` -} - -// Runners represents a collection of self-hosted runners for a repository. -type Runners struct { - TotalCount int `json:"total_count"` - Runners []*Runner `json:"runners"` -} - -// ListRunners lists all the self-hosted runners for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-self-hosted-runners-for-a-repository -func (s *ActionsService) ListRunners(ctx context.Context, owner, repo string, opts *ListOptions) (*Runners, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runners", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runners := &Runners{} - resp, err := s.client.Do(ctx, req, &runners) - if err != nil { - return nil, resp, err - } - - return runners, resp, nil -} - -// GetRunner gets a specific self-hosted runner for a repository using its runner ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#get-a-self-hosted-runner-for-a-repository -func (s *ActionsService) GetRunner(ctx context.Context, owner, repo string, runnerID int64) (*Runner, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runners/%v", owner, repo, runnerID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runner := new(Runner) - resp, err := s.client.Do(ctx, req, runner) - if err != nil { - return nil, resp, err - } - - return runner, resp, nil -} - -// RemoveToken represents a token that can be used to remove a self-hosted runner from a repository. -type RemoveToken struct { - Token *string `json:"token,omitempty"` - ExpiresAt *Timestamp `json:"expires_at,omitempty"` -} - -// CreateRemoveToken creates a token that can be used to remove a self-hosted runner from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-remove-token-for-a-repository -func (s *ActionsService) CreateRemoveToken(ctx context.Context, owner, repo string) (*RemoveToken, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runners/remove-token", owner, repo) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - removeToken := new(RemoveToken) - resp, err := s.client.Do(ctx, req, removeToken) - if err != nil { - return nil, resp, err - } - - return removeToken, resp, nil -} - -// RemoveRunner forces the removal of a self-hosted runner in a repository using the runner id. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-a-repository -func (s *ActionsService) RemoveRunner(ctx context.Context, owner, repo string, runnerID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runners/%v", owner, repo, runnerID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListOrganizationRunnerApplicationDownloads lists self-hosted runner application binaries that can be downloaded and run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-runner-applications-for-an-organization -func (s *ActionsService) ListOrganizationRunnerApplicationDownloads(ctx context.Context, owner string) ([]*RunnerApplicationDownload, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runners/downloads", owner) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rads []*RunnerApplicationDownload - resp, err := s.client.Do(ctx, req, &rads) - if err != nil { - return nil, resp, err - } - - return rads, resp, nil -} - -// CreateOrganizationRegistrationToken creates a token that can be used to add a self-hosted runner to an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-an-organization -func (s *ActionsService) CreateOrganizationRegistrationToken(ctx context.Context, owner string) (*RegistrationToken, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runners/registration-token", owner) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - registrationToken := new(RegistrationToken) - resp, err := s.client.Do(ctx, req, registrationToken) - if err != nil { - return nil, resp, err - } - - return registrationToken, resp, nil -} - -// ListOrganizationRunners lists all the self-hosted runners for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-self-hosted-runners-for-an-organization -func (s *ActionsService) ListOrganizationRunners(ctx context.Context, owner string, opts *ListOptions) (*Runners, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runners", owner) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runners := &Runners{} - resp, err := s.client.Do(ctx, req, &runners) - if err != nil { - return nil, resp, err - } - - return runners, resp, nil -} - -// ListEnabledReposInOrg lists the selected repositories that are enabled for GitHub Actions in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#list-selected-repositories-enabled-for-github-actions-in-an-organization -func (s *ActionsService) ListEnabledReposInOrg(ctx context.Context, owner string, opts *ListOptions) (*ActionsEnabledOnOrgRepos, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions/repositories", owner) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - repos := &ActionsEnabledOnOrgRepos{} - resp, err := s.client.Do(ctx, req, repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// SetEnabledReposInOrg replaces the list of selected repositories that are enabled for GitHub Actions in an organization.. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-selected-repositories-enabled-for-github-actions-in-an-organization -func (s *ActionsService) SetEnabledReposInOrg(ctx context.Context, owner string, repositoryIDs []int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions/repositories", owner) - - req, err := s.client.NewRequest("PUT", u, struct { - IDs []int64 `json:"selected_repository_ids"` - }{IDs: repositoryIDs}) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// AddEnabledReposInOrg adds a repository to the list of selected repositories that are enabled for GitHub Actions in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#enable-a-selected-repository-for-github-actions-in-an-organization -func (s *ActionsService) AddEnabledReposInOrg(ctx context.Context, owner string, repositoryID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions/repositories/%v", owner, repositoryID) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// RemoveEnabledRepoInOrg removes a single repository from the list of enabled repos for GitHub Actions in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#disable-a-selected-repository-for-github-actions-in-an-organization -func (s *ActionsService) RemoveEnabledRepoInOrg(ctx context.Context, owner string, repositoryID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions/repositories/%v", owner, repositoryID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// GetOrganizationRunner gets a specific self-hosted runner for an organization using its runner ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#get-a-self-hosted-runner-for-an-organization -func (s *ActionsService) GetOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*Runner, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runners/%v", owner, runnerID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runner := new(Runner) - resp, err := s.client.Do(ctx, req, runner) - if err != nil { - return nil, resp, err - } - - return runner, resp, nil -} - -// CreateOrganizationRemoveToken creates a token that can be used to remove a self-hosted runner from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-remove-token-for-an-organization -func (s *ActionsService) CreateOrganizationRemoveToken(ctx context.Context, owner string) (*RemoveToken, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runners/remove-token", owner) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - removeToken := new(RemoveToken) - resp, err := s.client.Do(ctx, req, removeToken) - if err != nil { - return nil, resp, err - } - - return removeToken, resp, nil -} - -// RemoveOrganizationRunner forces the removal of a self-hosted runner from an organization using the runner id. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-an-organization -func (s *ActionsService) RemoveOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/actions/runners/%v", owner, runnerID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_secrets.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_secrets.go deleted file mode 100644 index 316badb70d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_secrets.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" - "strconv" -) - -// PublicKey represents the public key that should be used to encrypt secrets. -type PublicKey struct { - KeyID *string `json:"key_id"` - Key *string `json:"key"` -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// This ensures GitHub Enterprise versions which return a numeric key id -// do not error out when unmarshaling. -func (p *PublicKey) UnmarshalJSON(data []byte) error { - var pk struct { - KeyID interface{} `json:"key_id,string"` - Key *string `json:"key"` - } - - if err := json.Unmarshal(data, &pk); err != nil { - return err - } - - p.Key = pk.Key - - switch v := pk.KeyID.(type) { - case nil: - return nil - case string: - p.KeyID = &v - case float64: - p.KeyID = String(strconv.FormatFloat(v, 'f', -1, 64)) - default: - return fmt.Errorf("unable to unmarshal %T as a string", v) - } - - return nil -} - -func (s *ActionsService) getPublicKey(ctx context.Context, url string) (*PublicKey, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - pubKey := new(PublicKey) - resp, err := s.client.Do(ctx, req, pubKey) - if err != nil { - return nil, resp, err - } - - return pubKey, resp, nil -} - -// GetRepoPublicKey gets a public key that should be used for secret encryption. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-a-repository-public-key -func (s *ActionsService) GetRepoPublicKey(ctx context.Context, owner, repo string) (*PublicKey, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/secrets/public-key", owner, repo) - return s.getPublicKey(ctx, url) -} - -// GetOrgPublicKey gets a public key that should be used for secret encryption. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-organization-public-key -func (s *ActionsService) GetOrgPublicKey(ctx context.Context, org string) (*PublicKey, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/public-key", org) - return s.getPublicKey(ctx, url) -} - -// GetEnvPublicKey gets a public key that should be used for secret encryption. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-environment-public-key -func (s *ActionsService) GetEnvPublicKey(ctx context.Context, repoID int, env string) (*PublicKey, *Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/secrets/public-key", repoID, env) - return s.getPublicKey(ctx, url) -} - -// Secret represents a repository action secret. -type Secret struct { - Name string `json:"name"` - CreatedAt Timestamp `json:"created_at"` - UpdatedAt Timestamp `json:"updated_at"` - Visibility string `json:"visibility,omitempty"` - SelectedRepositoriesURL string `json:"selected_repositories_url,omitempty"` -} - -// Secrets represents one item from the ListSecrets response. -type Secrets struct { - TotalCount int `json:"total_count"` - Secrets []*Secret `json:"secrets"` -} - -func (s *ActionsService) listSecrets(ctx context.Context, url string, opts *ListOptions) (*Secrets, *Response, error) { - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - secrets := new(Secrets) - resp, err := s.client.Do(ctx, req, &secrets) - if err != nil { - return nil, resp, err - } - - return secrets, resp, nil -} - -// ListRepoSecrets lists all secrets available in a repository -// without revealing their encrypted values. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-repository-secrets -func (s *ActionsService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/secrets", owner, repo) - return s.listSecrets(ctx, url, opts) -} - -// ListOrgSecrets lists all secrets available in an organization -// without revealing their encrypted values. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-organization-secrets -func (s *ActionsService) ListOrgSecrets(ctx context.Context, org string, opts *ListOptions) (*Secrets, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets", org) - return s.listSecrets(ctx, url, opts) -} - -// ListEnvSecrets lists all secrets available in an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-environment-secrets -func (s *ActionsService) ListEnvSecrets(ctx context.Context, repoID int, env string, opts *ListOptions) (*Secrets, *Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/secrets", repoID, env) - return s.listSecrets(ctx, url, opts) -} - -func (s *ActionsService) getSecret(ctx context.Context, url string) (*Secret, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - secret := new(Secret) - resp, err := s.client.Do(ctx, req, secret) - if err != nil { - return nil, resp, err - } - - return secret, resp, nil -} - -// GetRepoSecret gets a single repository secret without revealing its encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-a-repository-secret -func (s *ActionsService) GetRepoSecret(ctx context.Context, owner, repo, name string) (*Secret, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/secrets/%v", owner, repo, name) - return s.getSecret(ctx, url) -} - -// GetOrgSecret gets a single organization secret without revealing its encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-organization-secret -func (s *ActionsService) GetOrgSecret(ctx context.Context, org, name string) (*Secret, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/%v", org, name) - return s.getSecret(ctx, url) -} - -// GetEnvSecret gets a single environment secret without revealing its encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-environment-secret -func (s *ActionsService) GetEnvSecret(ctx context.Context, repoID int, env, secretName string) (*Secret, *Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/secrets/%v", repoID, env, secretName) - return s.getSecret(ctx, url) -} - -// SelectedRepoIDs are the repository IDs that have access to the actions secrets. -type SelectedRepoIDs []int64 - -// EncryptedSecret represents a secret that is encrypted using a public key. -// -// The value of EncryptedValue must be your secret, encrypted with -// LibSodium (see documentation here: https://libsodium.gitbook.io/doc/bindings_for_other_languages) -// using the public key retrieved using the GetPublicKey method. -type EncryptedSecret struct { - Name string `json:"-"` - KeyID string `json:"key_id"` - EncryptedValue string `json:"encrypted_value"` - Visibility string `json:"visibility,omitempty"` - SelectedRepositoryIDs SelectedRepoIDs `json:"selected_repository_ids,omitempty"` -} - -func (s *ActionsService) putSecret(ctx context.Context, url string, eSecret *EncryptedSecret) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, eSecret) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// CreateOrUpdateRepoSecret creates or updates a repository secret with an encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#create-or-update-a-repository-secret -func (s *ActionsService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *EncryptedSecret) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/secrets/%v", owner, repo, eSecret.Name) - return s.putSecret(ctx, url, eSecret) -} - -// CreateOrUpdateOrgSecret creates or updates an organization secret with an encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#create-or-update-an-organization-secret -func (s *ActionsService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *EncryptedSecret) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/%v", org, eSecret.Name) - return s.putSecret(ctx, url, eSecret) -} - -// CreateOrUpdateEnvSecret creates or updates a single environment secret with an encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#create-or-update-an-environment-secret -func (s *ActionsService) CreateOrUpdateEnvSecret(ctx context.Context, repoID int, env string, eSecret *EncryptedSecret) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/secrets/%v", repoID, env, eSecret.Name) - return s.putSecret(ctx, url, eSecret) -} - -func (s *ActionsService) deleteSecret(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteRepoSecret deletes a secret in a repository using the secret name. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#delete-a-repository-secret -func (s *ActionsService) DeleteRepoSecret(ctx context.Context, owner, repo, name string) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/secrets/%v", owner, repo, name) - return s.deleteSecret(ctx, url) -} - -// DeleteOrgSecret deletes a secret in an organization using the secret name. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#delete-an-organization-secret -func (s *ActionsService) DeleteOrgSecret(ctx context.Context, org, name string) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/%v", org, name) - return s.deleteSecret(ctx, url) -} - -// DeleteEnvSecret deletes a secret in an environment using the secret name. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#delete-an-environment-secret -func (s *ActionsService) DeleteEnvSecret(ctx context.Context, repoID int, env, secretName string) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/secrets/%v", repoID, env, secretName) - return s.deleteSecret(ctx, url) -} - -// SelectedReposList represents the list of repositories selected for an organization secret. -type SelectedReposList struct { - TotalCount *int `json:"total_count,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` -} - -func (s *ActionsService) listSelectedReposForSecret(ctx context.Context, url string, opts *ListOptions) (*SelectedReposList, *Response, error) { - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - result := new(SelectedReposList) - resp, err := s.client.Do(ctx, req, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// ListSelectedReposForOrgSecret lists all repositories that have access to a secret. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-selected-repositories-for-an-organization-secret -func (s *ActionsService) ListSelectedReposForOrgSecret(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories", org, name) - return s.listSelectedReposForSecret(ctx, url, opts) -} - -func (s *ActionsService) setSelectedReposForSecret(ctx context.Context, url string, ids SelectedRepoIDs) (*Response, error) { - type repoIDs struct { - SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` - } - - req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// SetSelectedReposForOrgSecret sets the repositories that have access to a secret. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#set-selected-repositories-for-an-organization-secret -func (s *ActionsService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories", org, name) - return s.setSelectedReposForSecret(ctx, url, ids) -} - -func (s *ActionsService) addSelectedRepoToSecret(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// AddSelectedRepoToOrgSecret adds a repository to an organization secret. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#add-selected-repository-to-an-organization-secret -func (s *ActionsService) AddSelectedRepoToOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories/%v", org, name, *repo.ID) - return s.addSelectedRepoToSecret(ctx, url) -} - -func (s *ActionsService) removeSelectedRepoFromSecret(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveSelectedRepoFromOrgSecret removes a repository from an organization secret. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#remove-selected-repository-from-an-organization-secret -func (s *ActionsService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories/%v", org, name, *repo.ID) - return s.removeSelectedRepoFromSecret(ctx, url) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_variables.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_variables.go deleted file mode 100644 index 29445edd04..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_variables.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ActionsVariable represents a repository action variable. -type ActionsVariable struct { - Name string `json:"name"` - Value string `json:"value"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Visibility *string `json:"visibility,omitempty"` - // Used by ListOrgVariables and GetOrgVariables - SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"` - // Used by UpdateOrgVariable and CreateOrgVariable - SelectedRepositoryIDs *SelectedRepoIDs `json:"selected_repository_ids,omitempty"` -} - -// ActionsVariables represents one item from the ListVariables response. -type ActionsVariables struct { - TotalCount int `json:"total_count"` - Variables []*ActionsVariable `json:"variables"` -} - -func (s *ActionsService) listVariables(ctx context.Context, url string, opts *ListOptions) (*ActionsVariables, *Response, error) { - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - variables := new(ActionsVariables) - resp, err := s.client.Do(ctx, req, &variables) - if err != nil { - return nil, resp, err - } - - return variables, resp, nil -} - -// ListRepoVariables lists all variables available in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-repository-variables -func (s *ActionsService) ListRepoVariables(ctx context.Context, owner, repo string, opts *ListOptions) (*ActionsVariables, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/variables", owner, repo) - return s.listVariables(ctx, url, opts) -} - -// ListOrgVariables lists all variables available in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-organization-variables -func (s *ActionsService) ListOrgVariables(ctx context.Context, org string, opts *ListOptions) (*ActionsVariables, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables", org) - return s.listVariables(ctx, url, opts) -} - -// ListEnvVariables lists all variables available in an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-environment-variables -func (s *ActionsService) ListEnvVariables(ctx context.Context, repoID int, env string, opts *ListOptions) (*ActionsVariables, *Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables", repoID, env) - return s.listVariables(ctx, url, opts) -} - -func (s *ActionsService) getVariable(ctx context.Context, url string) (*ActionsVariable, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - variable := new(ActionsVariable) - resp, err := s.client.Do(ctx, req, variable) - if err != nil { - return nil, resp, err - } - - return variable, resp, nil -} - -// GetRepoVariable gets a single repository variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#get-a-repository-variable -func (s *ActionsService) GetRepoVariable(ctx context.Context, owner, repo, name string) (*ActionsVariable, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/variables/%v", owner, repo, name) - return s.getVariable(ctx, url) -} - -// GetOrgVariable gets a single organization variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#get-an-organization-variable -func (s *ActionsService) GetOrgVariable(ctx context.Context, org, name string) (*ActionsVariable, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables/%v", org, name) - return s.getVariable(ctx, url) -} - -// GetEnvVariable gets a single environment variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#get-an-environment-variable -func (s *ActionsService) GetEnvVariable(ctx context.Context, repoID int, env, variableName string) (*ActionsVariable, *Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variableName) - return s.getVariable(ctx, url) -} - -func (s *ActionsService) postVariable(ctx context.Context, url string, variable *ActionsVariable) (*Response, error) { - req, err := s.client.NewRequest("POST", url, variable) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// CreateRepoVariable creates a repository variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-a-repository-variable -func (s *ActionsService) CreateRepoVariable(ctx context.Context, owner, repo string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/variables", owner, repo) - return s.postVariable(ctx, url, variable) -} - -// CreateOrgVariable creates an organization variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-an-organization-variable -func (s *ActionsService) CreateOrgVariable(ctx context.Context, org string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables", org) - return s.postVariable(ctx, url, variable) -} - -// CreateEnvVariable creates an environment variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-an-environment-variable -func (s *ActionsService) CreateEnvVariable(ctx context.Context, repoID int, env string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables", repoID, env) - return s.postVariable(ctx, url, variable) -} - -func (s *ActionsService) patchVariable(ctx context.Context, url string, variable *ActionsVariable) (*Response, error) { - req, err := s.client.NewRequest("PATCH", url, variable) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// UpdateRepoVariable updates a repository variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#update-a-repository-variable -func (s *ActionsService) UpdateRepoVariable(ctx context.Context, owner, repo string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/variables/%v", owner, repo, variable.Name) - return s.patchVariable(ctx, url, variable) -} - -// UpdateOrgVariable updates an organization variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#update-an-organization-variable -func (s *ActionsService) UpdateOrgVariable(ctx context.Context, org string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables/%v", org, variable.Name) - return s.patchVariable(ctx, url, variable) -} - -// UpdateEnvVariable updates an environment variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-an-environment-variable -func (s *ActionsService) UpdateEnvVariable(ctx context.Context, repoID int, env string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variable.Name) - return s.patchVariable(ctx, url, variable) -} - -func (s *ActionsService) deleteVariable(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteRepoVariable deletes a variable in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#delete-a-repository-variable -func (s *ActionsService) DeleteRepoVariable(ctx context.Context, owner, repo, name string) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/actions/variables/%v", owner, repo, name) - return s.deleteVariable(ctx, url) -} - -// DeleteOrgVariable deletes a variable in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#delete-an-organization-variable -func (s *ActionsService) DeleteOrgVariable(ctx context.Context, org, name string) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables/%v", org, name) - return s.deleteVariable(ctx, url) -} - -// DeleteEnvVariable deletes a variable in an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#delete-an-environment-variable -func (s *ActionsService) DeleteEnvVariable(ctx context.Context, repoID int, env, variableName string) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variableName) - return s.deleteVariable(ctx, url) -} - -func (s *ActionsService) listSelectedReposForVariable(ctx context.Context, url string, opts *ListOptions) (*SelectedReposList, *Response, error) { - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - result := new(SelectedReposList) - resp, err := s.client.Do(ctx, req, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// ListSelectedReposForOrgVariable lists all repositories that have access to a variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-selected-repositories-for-an-organization-variable -func (s *ActionsService) ListSelectedReposForOrgVariable(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories", org, name) - return s.listSelectedReposForVariable(ctx, url, opts) -} - -func (s *ActionsService) setSelectedReposForVariable(ctx context.Context, url string, ids SelectedRepoIDs) (*Response, error) { - type repoIDs struct { - SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` - } - - req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// SetSelectedReposForOrgVariable sets the repositories that have access to a variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#set-selected-repositories-for-an-organization-variable -func (s *ActionsService) SetSelectedReposForOrgVariable(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories", org, name) - return s.setSelectedReposForVariable(ctx, url, ids) -} - -func (s *ActionsService) addSelectedRepoToVariable(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// AddSelectedRepoToOrgVariable adds a repository to an organization variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#add-selected-repository-to-an-organization-variable -func (s *ActionsService) AddSelectedRepoToOrgVariable(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories/%v", org, name, *repo.ID) - return s.addSelectedRepoToVariable(ctx, url) -} - -func (s *ActionsService) removeSelectedRepoFromVariable(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveSelectedRepoFromOrgVariable removes a repository from an organization variable. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/variables#remove-selected-repository-from-an-organization-variable -func (s *ActionsService) RemoveSelectedRepoFromOrgVariable(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories/%v", org, name, *repo.ID) - return s.removeSelectedRepoFromVariable(ctx, url) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go deleted file mode 100644 index 1f018b3e48..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/http" - "net/url" -) - -// TaskStep represents a single task step from a sequence of tasks of a job. -type TaskStep struct { - Name *string `json:"name,omitempty"` - Status *string `json:"status,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - Number *int64 `json:"number,omitempty"` - StartedAt *Timestamp `json:"started_at,omitempty"` - CompletedAt *Timestamp `json:"completed_at,omitempty"` -} - -// WorkflowJob represents a repository action workflow job. -type WorkflowJob struct { - ID *int64 `json:"id,omitempty"` - RunID *int64 `json:"run_id,omitempty"` - RunURL *string `json:"run_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` - HeadBranch *string `json:"head_branch,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Status *string `json:"status,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - StartedAt *Timestamp `json:"started_at,omitempty"` - CompletedAt *Timestamp `json:"completed_at,omitempty"` - Name *string `json:"name,omitempty"` - Steps []*TaskStep `json:"steps,omitempty"` - CheckRunURL *string `json:"check_run_url,omitempty"` - // Labels represents runner labels from the `runs-on:` key from a GitHub Actions workflow. - Labels []string `json:"labels,omitempty"` - RunnerID *int64 `json:"runner_id,omitempty"` - RunnerName *string `json:"runner_name,omitempty"` - RunnerGroupID *int64 `json:"runner_group_id,omitempty"` - RunnerGroupName *string `json:"runner_group_name,omitempty"` - RunAttempt *int64 `json:"run_attempt,omitempty"` - WorkflowName *string `json:"workflow_name,omitempty"` -} - -// Jobs represents a slice of repository action workflow job. -type Jobs struct { - TotalCount *int `json:"total_count,omitempty"` - Jobs []*WorkflowJob `json:"jobs,omitempty"` -} - -// ListWorkflowJobsOptions specifies optional parameters to ListWorkflowJobs. -type ListWorkflowJobsOptions struct { - // Filter specifies how jobs should be filtered by their completed_at timestamp. - // Possible values are: - // latest - Returns jobs from the most recent execution of the workflow run - // all - Returns all jobs for a workflow run, including from old executions of the workflow run - // - // Default value is "latest". - Filter string `url:"filter,omitempty"` - ListOptions -} - -// ListWorkflowJobs lists all jobs for a workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-jobs#list-jobs-for-a-workflow-run -func (s *ActionsService) ListWorkflowJobs(ctx context.Context, owner, repo string, runID int64, opts *ListWorkflowJobsOptions) (*Jobs, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/actions/runs/%v/jobs", owner, repo, runID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - jobs := new(Jobs) - resp, err := s.client.Do(ctx, req, &jobs) - if err != nil { - return nil, resp, err - } - - return jobs, resp, nil -} - -// GetWorkflowJobByID gets a specific job in a workflow run by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-jobs#get-a-job-for-a-workflow-run -func (s *ActionsService) GetWorkflowJobByID(ctx context.Context, owner, repo string, jobID int64) (*WorkflowJob, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v", owner, repo, jobID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - job := new(WorkflowJob) - resp, err := s.client.Do(ctx, req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// GetWorkflowJobLogs gets a redirect URL to download a plain text file of logs for a workflow job. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-jobs#download-job-logs-for-a-workflow-run -func (s *ActionsService) GetWorkflowJobLogs(ctx context.Context, owner, repo string, jobID int64, followRedirects bool) (*url.URL, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v/logs", owner, repo, jobID) - - resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusFound { - return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) - } - - parsedURL, err := url.Parse(resp.Header.Get("Location")) - return parsedURL, newResponse(resp), err -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go deleted file mode 100644 index 61f736be4f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/http" - "net/url" -) - -// WorkflowRun represents a repository action workflow run. -type WorkflowRun struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - NodeID *string `json:"node_id,omitempty"` - HeadBranch *string `json:"head_branch,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - RunNumber *int `json:"run_number,omitempty"` - RunAttempt *int `json:"run_attempt,omitempty"` - Event *string `json:"event,omitempty"` - DisplayTitle *string `json:"display_title,omitempty"` - Status *string `json:"status,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - WorkflowID *int64 `json:"workflow_id,omitempty"` - CheckSuiteID *int64 `json:"check_suite_id,omitempty"` - CheckSuiteNodeID *string `json:"check_suite_node_id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - PullRequests []*PullRequest `json:"pull_requests,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - RunStartedAt *Timestamp `json:"run_started_at,omitempty"` - JobsURL *string `json:"jobs_url,omitempty"` - LogsURL *string `json:"logs_url,omitempty"` - CheckSuiteURL *string `json:"check_suite_url,omitempty"` - ArtifactsURL *string `json:"artifacts_url,omitempty"` - CancelURL *string `json:"cancel_url,omitempty"` - RerunURL *string `json:"rerun_url,omitempty"` - PreviousAttemptURL *string `json:"previous_attempt_url,omitempty"` - HeadCommit *HeadCommit `json:"head_commit,omitempty"` - WorkflowURL *string `json:"workflow_url,omitempty"` - Repository *Repository `json:"repository,omitempty"` - HeadRepository *Repository `json:"head_repository,omitempty"` - Actor *User `json:"actor,omitempty"` -} - -// WorkflowRuns represents a slice of repository action workflow run. -type WorkflowRuns struct { - TotalCount *int `json:"total_count,omitempty"` - WorkflowRuns []*WorkflowRun `json:"workflow_runs,omitempty"` -} - -// ListWorkflowRunsOptions specifies optional parameters to ListWorkflowRuns. -type ListWorkflowRunsOptions struct { - Actor string `url:"actor,omitempty"` - Branch string `url:"branch,omitempty"` - Event string `url:"event,omitempty"` - Status string `url:"status,omitempty"` - Created string `url:"created,omitempty"` - HeadSHA string `url:"head_sha,omitempty"` - ExcludePullRequests bool `url:"exclude_pull_requests,omitempty"` - CheckSuiteID int64 `url:"check_suite_id,omitempty"` - ListOptions -} - -// WorkflowRunUsage represents a usage of a specific workflow run. -type WorkflowRunUsage struct { - Billable *WorkflowRunBillMap `json:"billable,omitempty"` - RunDurationMS *int64 `json:"run_duration_ms,omitempty"` -} - -// WorkflowRunBillMap represents different runner environments available for a workflow run. -// Its key is the name of its environment, e.g. "UBUNTU", "MACOS", "WINDOWS", etc. -type WorkflowRunBillMap map[string]*WorkflowRunBill - -// WorkflowRunBill specifies billable time for a specific environment in a workflow run. -type WorkflowRunBill struct { - TotalMS *int64 `json:"total_ms,omitempty"` - Jobs *int `json:"jobs,omitempty"` - JobRuns []*WorkflowRunJobRun `json:"job_runs,omitempty"` -} - -// WorkflowRunJobRun represents a usage of individual jobs of a specific workflow run. -type WorkflowRunJobRun struct { - JobID *int `json:"job_id,omitempty"` - DurationMS *int64 `json:"duration_ms,omitempty"` -} - -// WorkflowRunAttemptOptions specifies optional parameters to GetWorkflowRunAttempt. -type WorkflowRunAttemptOptions struct { - ExcludePullRequests *bool `url:"exclude_pull_requests,omitempty"` -} - -// PendingDeploymentsRequest specifies body parameters to PendingDeployments. -type PendingDeploymentsRequest struct { - EnvironmentIDs []int64 `json:"environment_ids"` - // State can be one of: "approved", "rejected". - State string `json:"state"` - Comment string `json:"comment"` -} - -func (s *ActionsService) listWorkflowRuns(ctx context.Context, endpoint string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { - u, err := addOptions(endpoint, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runs := new(WorkflowRuns) - resp, err := s.client.Do(ctx, req, &runs) - if err != nil { - return nil, resp, err - } - - return runs, resp, nil -} - -// ListWorkflowRunsByID lists all workflow runs by workflow ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs -func (s *ActionsService) ListWorkflowRunsByID(ctx context.Context, owner, repo string, workflowID int64, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/actions/workflows/%v/runs", owner, repo, workflowID) - return s.listWorkflowRuns(ctx, u, opts) -} - -// ListWorkflowRunsByFileName lists all workflow runs by workflow file name. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs -func (s *ActionsService) ListWorkflowRunsByFileName(ctx context.Context, owner, repo, workflowFileName string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/actions/workflows/%v/runs", owner, repo, workflowFileName) - return s.listWorkflowRuns(ctx, u, opts) -} - -// ListRepositoryWorkflowRuns lists all workflow runs for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository -func (s *ActionsService) ListRepositoryWorkflowRuns(ctx context.Context, owner, repo string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/actions/runs", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runs := new(WorkflowRuns) - resp, err := s.client.Do(ctx, req, &runs) - if err != nil { - return nil, resp, err - } - - return runs, resp, nil -} - -// GetWorkflowRunByID gets a specific workflow run by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-a-workflow-run -func (s *ActionsService) GetWorkflowRunByID(ctx context.Context, owner, repo string, runID int64) (*WorkflowRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v", owner, repo, runID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - run := new(WorkflowRun) - resp, err := s.client.Do(ctx, req, run) - if err != nil { - return nil, resp, err - } - - return run, resp, nil -} - -// GetWorkflowRunAttempt gets a specific workflow run attempt. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-a-workflow-run-attempt -func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo string, runID int64, attemptNumber int, opts *WorkflowRunAttemptOptions) (*WorkflowRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v", owner, repo, runID, attemptNumber) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - run := new(WorkflowRun) - resp, err := s.client.Do(ctx, req, run) - if err != nil { - return nil, resp, err - } - - return run, resp, nil -} - -// GetWorkflowRunAttemptLogs gets a redirect URL to download a plain text file of logs for a workflow run for attempt number. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#download-workflow-run-attempt-logs -func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, repo string, runID int64, attemptNumber int, followRedirects bool) (*url.URL, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v/logs", owner, repo, runID, attemptNumber) - - resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusFound { - return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) - } - - parsedURL, err := url.Parse(resp.Header.Get("Location")) - return parsedURL, newResponse(resp), err -} - -// RerunWorkflowByID re-runs a workflow by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-workflow -func (s *ActionsService) RerunWorkflowByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/rerun", owner, repo, runID) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RerunFailedJobsByID re-runs all of the failed jobs and their dependent jobs in a workflow run by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-failed-jobs-from-a-workflow-run -func (s *ActionsService) RerunFailedJobsByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/rerun-failed-jobs", owner, repo, runID) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RerunJobByID re-runs a job and its dependent jobs in a workflow run by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-job-from-a-workflow-run -func (s *ActionsService) RerunJobByID(ctx context.Context, owner, repo string, jobID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v/rerun", owner, repo, jobID) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// CancelWorkflowRunByID cancels a workflow run by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#cancel-a-workflow-run -func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/cancel", owner, repo, runID) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetWorkflowRunLogs gets a redirect URL to download a plain text file of logs for a workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#download-workflow-run-logs -func (s *ActionsService) GetWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64, followRedirects bool) (*url.URL, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID) - - resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusFound { - return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) - } - - parsedURL, err := url.Parse(resp.Header.Get("Location")) - return parsedURL, newResponse(resp), err -} - -// DeleteWorkflowRun deletes a workflow run by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run -func (s *ActionsService) DeleteWorkflowRun(ctx context.Context, owner, repo string, runID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v", owner, repo, runID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteWorkflowRunLogs deletes all logs for a workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#delete-workflow-run-logs -func (s *ActionsService) DeleteWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetWorkflowRunUsageByID gets a specific workflow usage run by run ID in the unit of billable milliseconds. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-workflow-run-usage -func (s *ActionsService) GetWorkflowRunUsageByID(ctx context.Context, owner, repo string, runID int64) (*WorkflowRunUsage, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/timing", owner, repo, runID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - workflowRunUsage := new(WorkflowRunUsage) - resp, err := s.client.Do(ctx, req, workflowRunUsage) - if err != nil { - return nil, resp, err - } - - return workflowRunUsage, resp, nil -} - -// PendingDeployments approve or reject pending deployments that are waiting on approval by a required reviewer. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#review-pending-deployments-for-a-workflow-run -func (s *ActionsService) PendingDeployments(ctx context.Context, owner, repo string, runID int64, request *PendingDeploymentsRequest) ([]*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/pending_deployments", owner, repo, runID) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - var deployments []*Deployment - resp, err := s.client.Do(ctx, req, &deployments) - if err != nil { - return nil, resp, err - } - - return deployments, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflows.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflows.go deleted file mode 100644 index c9b47ed4be..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/actions_workflows.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Workflow represents a repository action workflow. -type Workflow struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - State *string `json:"state,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - BadgeURL *string `json:"badge_url,omitempty"` -} - -// Workflows represents a slice of repository action workflows. -type Workflows struct { - TotalCount *int `json:"total_count,omitempty"` - Workflows []*Workflow `json:"workflows,omitempty"` -} - -// WorkflowUsage represents a usage of a specific workflow. -type WorkflowUsage struct { - Billable *WorkflowBillMap `json:"billable,omitempty"` -} - -// WorkflowBillMap represents different runner environments available for a workflow. -// Its key is the name of its environment, e.g. "UBUNTU", "MACOS", "WINDOWS", etc. -type WorkflowBillMap map[string]*WorkflowBill - -// WorkflowBill specifies billable time for a specific environment in a workflow. -type WorkflowBill struct { - TotalMS *int64 `json:"total_ms,omitempty"` -} - -// CreateWorkflowDispatchEventRequest represents a request to create a workflow dispatch event. -type CreateWorkflowDispatchEventRequest struct { - // Ref represents the reference of the workflow run. - // The reference can be a branch or a tag. - // Ref is required when creating a workflow dispatch event. - Ref string `json:"ref"` - // Inputs represents input keys and values configured in the workflow file. - // The maximum number of properties is 10. - // Default: Any default properties configured in the workflow file will be used when `inputs` are omitted. - Inputs map[string]interface{} `json:"inputs,omitempty"` -} - -// ListWorkflows lists all workflows in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#list-repository-workflows -func (s *ActionsService) ListWorkflows(ctx context.Context, owner, repo string, opts *ListOptions) (*Workflows, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/actions/workflows", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - workflows := new(Workflows) - resp, err := s.client.Do(ctx, req, &workflows) - if err != nil { - return nil, resp, err - } - - return workflows, resp, nil -} - -// GetWorkflowByID gets a specific workflow by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-a-workflow -func (s *ActionsService) GetWorkflowByID(ctx context.Context, owner, repo string, workflowID int64) (*Workflow, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v", owner, repo, workflowID) - - return s.getWorkflow(ctx, u) -} - -// GetWorkflowByFileName gets a specific workflow by file name. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-a-workflow -func (s *ActionsService) GetWorkflowByFileName(ctx context.Context, owner, repo, workflowFileName string) (*Workflow, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v", owner, repo, workflowFileName) - - return s.getWorkflow(ctx, u) -} - -func (s *ActionsService) getWorkflow(ctx context.Context, url string) (*Workflow, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - workflow := new(Workflow) - resp, err := s.client.Do(ctx, req, workflow) - if err != nil { - return nil, resp, err - } - - return workflow, resp, nil -} - -// GetWorkflowUsageByID gets a specific workflow usage by ID in the unit of billable milliseconds. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-workflow-usage -func (s *ActionsService) GetWorkflowUsageByID(ctx context.Context, owner, repo string, workflowID int64) (*WorkflowUsage, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/timing", owner, repo, workflowID) - - return s.getWorkflowUsage(ctx, u) -} - -// GetWorkflowUsageByFileName gets a specific workflow usage by file name in the unit of billable milliseconds. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-workflow-usage -func (s *ActionsService) GetWorkflowUsageByFileName(ctx context.Context, owner, repo, workflowFileName string) (*WorkflowUsage, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/timing", owner, repo, workflowFileName) - - return s.getWorkflowUsage(ctx, u) -} - -func (s *ActionsService) getWorkflowUsage(ctx context.Context, url string) (*WorkflowUsage, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - workflowUsage := new(WorkflowUsage) - resp, err := s.client.Do(ctx, req, workflowUsage) - if err != nil { - return nil, resp, err - } - - return workflowUsage, resp, nil -} - -// CreateWorkflowDispatchEventByID manually triggers a GitHub Actions workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#create-a-workflow-dispatch-event -func (s *ActionsService) CreateWorkflowDispatchEventByID(ctx context.Context, owner, repo string, workflowID int64, event CreateWorkflowDispatchEventRequest) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/dispatches", owner, repo, workflowID) - - return s.createWorkflowDispatchEvent(ctx, u, &event) -} - -// CreateWorkflowDispatchEventByFileName manually triggers a GitHub Actions workflow run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#create-a-workflow-dispatch-event -func (s *ActionsService) CreateWorkflowDispatchEventByFileName(ctx context.Context, owner, repo, workflowFileName string, event CreateWorkflowDispatchEventRequest) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/dispatches", owner, repo, workflowFileName) - - return s.createWorkflowDispatchEvent(ctx, u, &event) -} - -func (s *ActionsService) createWorkflowDispatchEvent(ctx context.Context, url string, event *CreateWorkflowDispatchEventRequest) (*Response, error) { - req, err := s.client.NewRequest("POST", url, event) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// EnableWorkflowByID enables a workflow and sets the state of the workflow to "active". -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#enable-a-workflow -func (s *ActionsService) EnableWorkflowByID(ctx context.Context, owner, repo string, workflowID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/enable", owner, repo, workflowID) - return s.doNewPutRequest(ctx, u) -} - -// EnableWorkflowByFileName enables a workflow and sets the state of the workflow to "active". -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#enable-a-workflow -func (s *ActionsService) EnableWorkflowByFileName(ctx context.Context, owner, repo, workflowFileName string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/enable", owner, repo, workflowFileName) - return s.doNewPutRequest(ctx, u) -} - -// DisableWorkflowByID disables a workflow and sets the state of the workflow to "disabled_manually". -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#disable-a-workflow -func (s *ActionsService) DisableWorkflowByID(ctx context.Context, owner, repo string, workflowID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/disable", owner, repo, workflowID) - return s.doNewPutRequest(ctx, u) -} - -// DisableWorkflowByFileName disables a workflow and sets the state of the workflow to "disabled_manually". -// -// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#disable-a-workflow -func (s *ActionsService) DisableWorkflowByFileName(ctx context.Context, owner, repo, workflowFileName string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/disable", owner, repo, workflowFileName) - return s.doNewPutRequest(ctx, u) -} - -func (s *ActionsService) doNewPutRequest(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/activity.go deleted file mode 100644 index 9cd9f9b71d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "context" - -// ActivityService handles communication with the activity related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/ -type ActivityService service - -// FeedLink represents a link to a related resource. -type FeedLink struct { - HRef *string `json:"href,omitempty"` - Type *string `json:"type,omitempty"` -} - -// Feeds represents timeline resources in Atom format. -type Feeds struct { - TimelineURL *string `json:"timeline_url,omitempty"` - UserURL *string `json:"user_url,omitempty"` - CurrentUserPublicURL *string `json:"current_user_public_url,omitempty"` - CurrentUserURL *string `json:"current_user_url,omitempty"` - CurrentUserActorURL *string `json:"current_user_actor_url,omitempty"` - CurrentUserOrganizationURL *string `json:"current_user_organization_url,omitempty"` - CurrentUserOrganizationURLs []string `json:"current_user_organization_urls,omitempty"` - Links *FeedLinks `json:"_links,omitempty"` -} - -// FeedLinks represents the links in a Feed. -type FeedLinks struct { - Timeline *FeedLink `json:"timeline,omitempty"` - User *FeedLink `json:"user,omitempty"` - CurrentUserPublic *FeedLink `json:"current_user_public,omitempty"` - CurrentUser *FeedLink `json:"current_user,omitempty"` - CurrentUserActor *FeedLink `json:"current_user_actor,omitempty"` - CurrentUserOrganization *FeedLink `json:"current_user_organization,omitempty"` - CurrentUserOrganizations []*FeedLink `json:"current_user_organizations,omitempty"` -} - -// ListFeeds lists all the feeds available to the authenticated user. -// -// GitHub provides several timeline resources in Atom format: -// -// Timeline: The GitHub global public timeline -// User: The public timeline for any user, using URI template -// Current user public: The public timeline for the authenticated user -// Current user: The private timeline for the authenticated user -// Current user actor: The private timeline for activity created by the -// authenticated user -// Current user organizations: The private timeline for the organizations -// the authenticated user is a member of. -// -// Note: Private feeds are only returned when authenticating via Basic Auth -// since current feed URIs use the older, non revocable auth tokens. -func (s *ActivityService) ListFeeds(ctx context.Context) (*Feeds, *Response, error) { - req, err := s.client.NewRequest("GET", "feeds", nil) - if err != nil { - return nil, nil, err - } - - f := &Feeds{} - resp, err := s.client.Do(ctx, req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_events.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_events.go deleted file mode 100644 index d6f0f043b0..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_events.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListEvents drinks from the firehose of all public events across GitHub. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events -func (s *ActivityService) ListEvents(ctx context.Context, opts *ListOptions) ([]*Event, *Response, error) { - u, err := addOptions("events", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListRepositoryEvents lists events for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-repository-events -func (s *ActivityService) ListRepositoryEvents(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/events", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListIssueEventsForRepository lists issue events for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/events#list-issue-events-for-a-repository -func (s *ActivityService) ListIssueEventsForRepository(ctx context.Context, owner, repo string, opts *ListOptions) ([]*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*IssueEvent - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsForRepoNetwork lists public events for a network of repositories. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events-for-a-network-of-repositories -func (s *ActivityService) ListEventsForRepoNetwork(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("networks/%v/%v/events", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsForOrganization lists public events for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-organization-events -func (s *ActivityService) ListEventsForOrganization(ctx context.Context, org string, opts *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("orgs/%v/events", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsPerformedByUser lists the events performed by a user. If publicOnly is -// true, only public events will be returned. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-events-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events-for-a-user -func (s *ActivityService) ListEventsPerformedByUser(ctx context.Context, user string, publicOnly bool, opts *ListOptions) ([]*Event, *Response, error) { - var u string - if publicOnly { - u = fmt.Sprintf("users/%v/events/public", user) - } else { - u = fmt.Sprintf("users/%v/events", user) - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListEventsReceivedByUser lists the events received by a user. If publicOnly is -// true, only public events will be returned. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-events-received-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events-received-by-a-user -func (s *ActivityService) ListEventsReceivedByUser(ctx context.Context, user string, publicOnly bool, opts *ListOptions) ([]*Event, *Response, error) { - var u string - if publicOnly { - u = fmt.Sprintf("users/%v/received_events/public", user) - } else { - u = fmt.Sprintf("users/%v/received_events", user) - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListUserEventsForOrganization provides the user’s organization dashboard. You -// must be authenticated as the user to view this. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-organization-events-for-the-authenticated-user -func (s *ActivityService) ListUserEventsForOrganization(ctx context.Context, org, user string, opts *ListOptions) ([]*Event, *Response, error) { - u := fmt.Sprintf("users/%v/events/orgs/%v", user, org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*Event - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_notifications.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_notifications.go deleted file mode 100644 index 03476c2e2c..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_notifications.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// Notification identifies a GitHub notification for a user. -type Notification struct { - ID *string `json:"id,omitempty"` - Repository *Repository `json:"repository,omitempty"` - Subject *NotificationSubject `json:"subject,omitempty"` - - // Reason identifies the event that triggered the notification. - // - // GitHub API docs: https://docs.github.com/en/rest/activity#notification-reasons - Reason *string `json:"reason,omitempty"` - - Unread *bool `json:"unread,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - LastReadAt *Timestamp `json:"last_read_at,omitempty"` - URL *string `json:"url,omitempty"` -} - -// NotificationSubject identifies the subject of a notification. -type NotificationSubject struct { - Title *string `json:"title,omitempty"` - URL *string `json:"url,omitempty"` - LatestCommentURL *string `json:"latest_comment_url,omitempty"` - Type *string `json:"type,omitempty"` -} - -// NotificationListOptions specifies the optional parameters to the -// ActivityService.ListNotifications method. -type NotificationListOptions struct { - All bool `url:"all,omitempty"` - Participating bool `url:"participating,omitempty"` - Since time.Time `url:"since,omitempty"` - Before time.Time `url:"before,omitempty"` - - ListOptions -} - -// ListNotifications lists all notifications for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#list-notifications-for-the-authenticated-user -func (s *ActivityService) ListNotifications(ctx context.Context, opts *NotificationListOptions) ([]*Notification, *Response, error) { - u := "notifications" - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var notifications []*Notification - resp, err := s.client.Do(ctx, req, ¬ifications) - if err != nil { - return nil, resp, err - } - - return notifications, resp, nil -} - -// ListRepositoryNotifications lists all notifications in a given repository -// for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#list-repository-notifications-for-the-authenticated-user -func (s *ActivityService) ListRepositoryNotifications(ctx context.Context, owner, repo string, opts *NotificationListOptions) ([]*Notification, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var notifications []*Notification - resp, err := s.client.Do(ctx, req, ¬ifications) - if err != nil { - return nil, resp, err - } - - return notifications, resp, nil -} - -type markReadOptions struct { - LastReadAt Timestamp `json:"last_read_at,omitempty"` -} - -// MarkNotificationsRead marks all notifications up to lastRead as read. -// -// GitHub API docs: https://docs.github.com/en/rest/activity#mark-as-read -func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead Timestamp) (*Response, error) { - opts := &markReadOptions{ - LastReadAt: lastRead, - } - req, err := s.client.NewRequest("PUT", "notifications", opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// MarkRepositoryNotificationsRead marks all notifications up to lastRead in -// the specified repository as read. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#mark-repository-notifications-as-read -func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, owner, repo string, lastRead Timestamp) (*Response, error) { - opts := &markReadOptions{ - LastReadAt: lastRead, - } - u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetThread gets the specified notification thread. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#get-a-thread -func (s *ActivityService) GetThread(ctx context.Context, id string) (*Notification, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - notification := new(Notification) - resp, err := s.client.Do(ctx, req, notification) - if err != nil { - return nil, resp, err - } - - return notification, resp, nil -} - -// MarkThreadRead marks the specified thread as read. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#mark-a-thread-as-read -func (s *ActivityService) MarkThreadRead(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("notifications/threads/%v", id) - - req, err := s.client.NewRequest("PATCH", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetThreadSubscription checks to see if the authenticated user is subscribed -// to a thread. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#get-a-thread-subscription-for-the-authenticated-user -func (s *ActivityService) GetThreadSubscription(ctx context.Context, id string) (*Subscription, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, nil -} - -// SetThreadSubscription sets the subscription for the specified thread for the -// authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#set-a-thread-subscription -func (s *ActivityService) SetThreadSubscription(ctx context.Context, id string, subscription *Subscription) (*Subscription, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - - req, err := s.client.NewRequest("PUT", u, subscription) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, nil -} - -// DeleteThreadSubscription deletes the subscription for the specified thread -// for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#delete-a-thread-subscription -func (s *ActivityService) DeleteThreadSubscription(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_star.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_star.go deleted file mode 100644 index 65a316f532..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_star.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" -) - -// StarredRepository is returned by ListStarred. -type StarredRepository struct { - StarredAt *Timestamp `json:"starred_at,omitempty"` - Repository *Repository `json:"repo,omitempty"` -} - -// Stargazer represents a user that has starred a repository. -type Stargazer struct { - StarredAt *Timestamp `json:"starred_at,omitempty"` - User *User `json:"user,omitempty"` -} - -// ListStargazers lists people who have starred the specified repo. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/starring#list-stargazers -func (s *ActivityService) ListStargazers(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Stargazer, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeStarringPreview) - - var stargazers []*Stargazer - resp, err := s.client.Do(ctx, req, &stargazers) - if err != nil { - return nil, resp, err - } - - return stargazers, resp, nil -} - -// ActivityListStarredOptions specifies the optional parameters to the -// ActivityService.ListStarred method. -type ActivityListStarredOptions struct { - // How to sort the repository list. Possible values are: created, updated, - // pushed, full_name. Default is "full_name". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort repositories. Possible values are: asc, desc. - // Default is "asc" when sort is "full_name", otherwise default is "desc". - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// ListStarred lists all the repos starred by a user. Passing the empty string -// will list the starred repositories for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/starring#list-repositories-starred-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/activity/starring#list-repositories-starred-by-a-user -func (s *ActivityService) ListStarred(ctx context.Context, user string, opts *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/starred", user) - } else { - u = "user/starred" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when APIs fully launch - acceptHeaders := []string{mediaTypeStarringPreview, mediaTypeTopicsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var repos []*StarredRepository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// IsStarred checks if a repository is starred by authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/starring#check-if-a-repository-is-starred-by-the-authenticated-user -func (s *ActivityService) IsStarred(ctx context.Context, owner, repo string) (bool, *Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - starred, err := parseBoolResponse(err) - return starred, resp, err -} - -// Star a repository as the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/starring#star-a-repository-for-the-authenticated-user -func (s *ActivityService) Star(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unstar a repository as the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/starring#unstar-a-repository-for-the-authenticated-user -func (s *ActivityService) Unstar(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_watching.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_watching.go deleted file mode 100644 index 2d6fafcc79..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/activity_watching.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Subscription identifies a repository or thread subscription. -type Subscription struct { - Subscribed *bool `json:"subscribed,omitempty"` - Ignored *bool `json:"ignored,omitempty"` - Reason *string `json:"reason,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - URL *string `json:"url,omitempty"` - - // only populated for repository subscriptions - RepositoryURL *string `json:"repository_url,omitempty"` - - // only populated for thread subscriptions - ThreadURL *string `json:"thread_url,omitempty"` -} - -// ListWatchers lists watchers of a particular repo. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/watching#list-watchers -func (s *ActivityService) ListWatchers(ctx context.Context, owner, repo string, opts *ListOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var watchers []*User - resp, err := s.client.Do(ctx, req, &watchers) - if err != nil { - return nil, resp, err - } - - return watchers, resp, nil -} - -// ListWatched lists the repositories the specified user is watching. Passing -// the empty string will fetch watched repos for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/watching#list-repositories-watched-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/activity/watching#list-repositories-watched-by-a-user -func (s *ActivityService) ListWatched(ctx context.Context, user string, opts *ListOptions) ([]*Repository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/subscriptions", user) - } else { - u = "user/subscriptions" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var watched []*Repository - resp, err := s.client.Do(ctx, req, &watched) - if err != nil { - return nil, resp, err - } - - return watched, resp, nil -} - -// GetRepositorySubscription returns the subscription for the specified -// repository for the authenticated user. If the authenticated user is not -// watching the repository, a nil Subscription is returned. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/watching#get-a-repository-subscription -func (s *ActivityService) GetRepositorySubscription(ctx context.Context, owner, repo string) (*Subscription, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - // if it's just a 404, don't return that as an error - _, err = parseBoolResponse(err) - return nil, resp, err - } - - return sub, resp, nil -} - -// SetRepositorySubscription sets the subscription for the specified repository -// for the authenticated user. -// -// To watch a repository, set subscription.Subscribed to true. -// To ignore notifications made within a repository, set subscription.Ignored to true. -// To stop watching a repository, use DeleteRepositorySubscription. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/watching#set-a-repository-subscription -func (s *ActivityService) SetRepositorySubscription(ctx context.Context, owner, repo string, subscription *Subscription) (*Subscription, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - - req, err := s.client.NewRequest("PUT", u, subscription) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(ctx, req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, nil -} - -// DeleteRepositorySubscription deletes the subscription for the specified -// repository for the authenticated user. -// -// This is used to stop watching a repository. To control whether or not to -// receive notifications from a repository, use SetRepositorySubscription. -// -// GitHub API docs: https://docs.github.com/en/rest/activity/watching#delete-a-repository-subscription -func (s *ActivityService) DeleteRepositorySubscription(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/admin.go deleted file mode 100644 index 1b28ef64c7..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// AdminService handles communication with the admin related methods of the -// GitHub API. These API routes are normally only accessible for GitHub -// Enterprise installations. -// -// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin -type AdminService service - -// TeamLDAPMapping represents the mapping between a GitHub team and an LDAP group. -type TeamLDAPMapping struct { - ID *int64 `json:"id,omitempty"` - LDAPDN *string `json:"ldap_dn,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Slug *string `json:"slug,omitempty"` - Description *string `json:"description,omitempty"` - Privacy *string `json:"privacy,omitempty"` - Permission *string `json:"permission,omitempty"` - - MembersURL *string `json:"members_url,omitempty"` - RepositoriesURL *string `json:"repositories_url,omitempty"` -} - -func (m TeamLDAPMapping) String() string { - return Stringify(m) -} - -// UserLDAPMapping represents the mapping between a GitHub user and an LDAP user. -type UserLDAPMapping struct { - ID *int64 `json:"id,omitempty"` - LDAPDN *string `json:"ldap_dn,omitempty"` - Login *string `json:"login,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin,omitempty"` - - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` -} - -func (m UserLDAPMapping) String() string { - return Stringify(m) -} - -// Enterprise represents the GitHub enterprise profile. -type Enterprise struct { - ID *int `json:"id,omitempty"` - Slug *string `json:"slug,omitempty"` - Name *string `json:"name,omitempty"` - NodeID *string `json:"node_id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - Description *string `json:"description,omitempty"` - WebsiteURL *string `json:"website_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -func (m Enterprise) String() string { - return Stringify(m) -} - -// UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user. -// -// GitHub API docs: https://docs.github.com/en/enterprise-server/rest/enterprise-admin/ldap#update-ldap-mapping-for-a-user -func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) { - u := fmt.Sprintf("admin/ldap/users/%v/mapping", user) - req, err := s.client.NewRequest("PATCH", u, mapping) - if err != nil { - return nil, nil, err - } - - m := new(UserLDAPMapping) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group. -// -// GitHub API docs: https://docs.github.com/en/rest/enterprise/ldap/#update-ldap-mapping-for-a-team -func (s *AdminService) UpdateTeamLDAPMapping(ctx context.Context, team int64, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) { - u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team) - req, err := s.client.NewRequest("PATCH", u, mapping) - if err != nil { - return nil, nil, err - } - - m := new(TeamLDAPMapping) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_orgs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_orgs.go deleted file mode 100644 index 448e51f631..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_orgs.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// createOrgRequest is a subset of Organization and is used internally -// by CreateOrg to pass only the known fields for the endpoint. -type createOrgRequest struct { - Login *string `json:"login,omitempty"` - Admin *string `json:"admin,omitempty"` -} - -// CreateOrg creates a new organization in GitHub Enterprise. -// -// Note that only a subset of the org fields are used and org must -// not be nil. -// -// GitHub Enterprise API docs: https://developer.github.com/enterprise/v3/enterprise-admin/orgs/#create-an-organization -func (s *AdminService) CreateOrg(ctx context.Context, org *Organization, admin string) (*Organization, *Response, error) { - u := "admin/organizations" - - orgReq := &createOrgRequest{ - Login: org.Login, - Admin: &admin, - } - - req, err := s.client.NewRequest("POST", u, orgReq) - if err != nil { - return nil, nil, err - } - - o := new(Organization) - resp, err := s.client.Do(ctx, req, o) - if err != nil { - return nil, resp, err - } - - return o, resp, nil -} - -// renameOrgRequest is a subset of Organization and is used internally -// by RenameOrg and RenameOrgByName to pass only the known fields for the endpoint. -type renameOrgRequest struct { - Login *string `json:"login,omitempty"` -} - -// RenameOrgResponse is the response given when renaming an Organization. -type RenameOrgResponse struct { - Message *string `json:"message,omitempty"` - URL *string `json:"url,omitempty"` -} - -// RenameOrg renames an organization in GitHub Enterprise. -// -// GitHub Enterprise API docs: https://developer.github.com/enterprise/v3/enterprise-admin/orgs/#rename-an-organization -func (s *AdminService) RenameOrg(ctx context.Context, org *Organization, newName string) (*RenameOrgResponse, *Response, error) { - return s.RenameOrgByName(ctx, *org.Login, newName) -} - -// RenameOrgByName renames an organization in GitHub Enterprise using its current name. -// -// GitHub Enterprise API docs: https://developer.github.com/enterprise/v3/enterprise-admin/orgs/#rename-an-organization -func (s *AdminService) RenameOrgByName(ctx context.Context, org, newName string) (*RenameOrgResponse, *Response, error) { - u := fmt.Sprintf("admin/organizations/%v", org) - - orgReq := &renameOrgRequest{ - Login: &newName, - } - - req, err := s.client.NewRequest("PATCH", u, orgReq) - if err != nil { - return nil, nil, err - } - - o := new(RenameOrgResponse) - resp, err := s.client.Do(ctx, req, o) - if err != nil { - return nil, resp, err - } - - return o, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_stats.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_stats.go deleted file mode 100644 index ef294f4479..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_stats.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// AdminStats represents a variety of stats of a GitHub Enterprise -// installation. -type AdminStats struct { - Issues *IssueStats `json:"issues,omitempty"` - Hooks *HookStats `json:"hooks,omitempty"` - Milestones *MilestoneStats `json:"milestones,omitempty"` - Orgs *OrgStats `json:"orgs,omitempty"` - Comments *CommentStats `json:"comments,omitempty"` - Pages *PageStats `json:"pages,omitempty"` - Users *UserStats `json:"users,omitempty"` - Gists *GistStats `json:"gists,omitempty"` - Pulls *PullStats `json:"pulls,omitempty"` - Repos *RepoStats `json:"repos,omitempty"` -} - -func (s AdminStats) String() string { - return Stringify(s) -} - -// IssueStats represents the number of total, open and closed issues. -type IssueStats struct { - TotalIssues *int `json:"total_issues,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` - ClosedIssues *int `json:"closed_issues,omitempty"` -} - -func (s IssueStats) String() string { - return Stringify(s) -} - -// HookStats represents the number of total, active and inactive hooks. -type HookStats struct { - TotalHooks *int `json:"total_hooks,omitempty"` - ActiveHooks *int `json:"active_hooks,omitempty"` - InactiveHooks *int `json:"inactive_hooks,omitempty"` -} - -func (s HookStats) String() string { - return Stringify(s) -} - -// MilestoneStats represents the number of total, open and close milestones. -type MilestoneStats struct { - TotalMilestones *int `json:"total_milestones,omitempty"` - OpenMilestones *int `json:"open_milestones,omitempty"` - ClosedMilestones *int `json:"closed_milestones,omitempty"` -} - -func (s MilestoneStats) String() string { - return Stringify(s) -} - -// OrgStats represents the number of total, disabled organizations and the team -// and team member count. -type OrgStats struct { - TotalOrgs *int `json:"total_orgs,omitempty"` - DisabledOrgs *int `json:"disabled_orgs,omitempty"` - TotalTeams *int `json:"total_teams,omitempty"` - TotalTeamMembers *int `json:"total_team_members,omitempty"` -} - -func (s OrgStats) String() string { - return Stringify(s) -} - -// CommentStats represents the number of total comments on commits, gists, issues -// and pull requests. -type CommentStats struct { - TotalCommitComments *int `json:"total_commit_comments,omitempty"` - TotalGistComments *int `json:"total_gist_comments,omitempty"` - TotalIssueComments *int `json:"total_issue_comments,omitempty"` - TotalPullRequestComments *int `json:"total_pull_request_comments,omitempty"` -} - -func (s CommentStats) String() string { - return Stringify(s) -} - -// PageStats represents the total number of github pages. -type PageStats struct { - TotalPages *int `json:"total_pages,omitempty"` -} - -func (s PageStats) String() string { - return Stringify(s) -} - -// UserStats represents the number of total, admin and suspended users. -type UserStats struct { - TotalUsers *int `json:"total_users,omitempty"` - AdminUsers *int `json:"admin_users,omitempty"` - SuspendedUsers *int `json:"suspended_users,omitempty"` -} - -func (s UserStats) String() string { - return Stringify(s) -} - -// GistStats represents the number of total, private and public gists. -type GistStats struct { - TotalGists *int `json:"total_gists,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` -} - -func (s GistStats) String() string { - return Stringify(s) -} - -// PullStats represents the number of total, merged, mergable and unmergeable -// pull-requests. -type PullStats struct { - TotalPulls *int `json:"total_pulls,omitempty"` - MergedPulls *int `json:"merged_pulls,omitempty"` - MergablePulls *int `json:"mergeable_pulls,omitempty"` - UnmergablePulls *int `json:"unmergeable_pulls,omitempty"` -} - -func (s PullStats) String() string { - return Stringify(s) -} - -// RepoStats represents the number of total, root, fork, organization repositories -// together with the total number of pushes and wikis. -type RepoStats struct { - TotalRepos *int `json:"total_repos,omitempty"` - RootRepos *int `json:"root_repos,omitempty"` - ForkRepos *int `json:"fork_repos,omitempty"` - OrgRepos *int `json:"org_repos,omitempty"` - TotalPushes *int `json:"total_pushes,omitempty"` - TotalWikis *int `json:"total_wikis,omitempty"` -} - -func (s RepoStats) String() string { - return Stringify(s) -} - -// GetAdminStats returns a variety of metrics about a GitHub Enterprise -// installation. -// -// Please note that this is only available to site administrators, -// otherwise it will error with a 404 not found (instead of 401 or 403). -// -// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/admin_stats/ -func (s *AdminService) GetAdminStats(ctx context.Context) (*AdminStats, *Response, error) { - u := fmt.Sprintf("enterprise/stats/all") - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - m := new(AdminStats) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_users.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_users.go deleted file mode 100644 index d756a77e20..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/admin_users.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2019 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// createUserRequest is a subset of User and is used internally -// by CreateUser to pass only the known fields for the endpoint. -type createUserRequest struct { - Login *string `json:"login,omitempty"` - Email *string `json:"email,omitempty"` -} - -// CreateUser creates a new user in GitHub Enterprise. -// -// GitHub Enterprise API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#create-a-new-user -func (s *AdminService) CreateUser(ctx context.Context, login, email string) (*User, *Response, error) { - u := "admin/users" - - userReq := &createUserRequest{ - Login: &login, - Email: &email, - } - - req, err := s.client.NewRequest("POST", u, userReq) - if err != nil { - return nil, nil, err - } - - var user User - resp, err := s.client.Do(ctx, req, &user) - if err != nil { - return nil, resp, err - } - - return &user, resp, nil -} - -// DeleteUser deletes a user in GitHub Enterprise. -// -// GitHub Enterprise API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#delete-a-user -func (s *AdminService) DeleteUser(ctx context.Context, username string) (*Response, error) { - u := "admin/users/" + username - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// ImpersonateUserOptions represents the scoping for the OAuth token. -type ImpersonateUserOptions struct { - Scopes []string `json:"scopes,omitempty"` -} - -// OAuthAPP represents the GitHub Site Administrator OAuth app. -type OAuthAPP struct { - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - ClientID *string `json:"client_id,omitempty"` -} - -func (s OAuthAPP) String() string { - return Stringify(s) -} - -// UserAuthorization represents the impersonation response. -type UserAuthorization struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Scopes []string `json:"scopes,omitempty"` - Token *string `json:"token,omitempty"` - TokenLastEight *string `json:"token_last_eight,omitempty"` - HashedToken *string `json:"hashed_token,omitempty"` - App *OAuthAPP `json:"app,omitempty"` - Note *string `json:"note,omitempty"` - NoteURL *string `json:"note_url,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` -} - -// CreateUserImpersonation creates an impersonation OAuth token. -// -// GitHub Enterprise API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#create-an-impersonation-oauth-token -func (s *AdminService) CreateUserImpersonation(ctx context.Context, username string, opts *ImpersonateUserOptions) (*UserAuthorization, *Response, error) { - u := fmt.Sprintf("admin/users/%s/authorizations", username) - - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - a := new(UserAuthorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// DeleteUserImpersonation deletes an impersonation OAuth token. -// -// GitHub Enterprise API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#delete-an-impersonation-oauth-token -func (s *AdminService) DeleteUserImpersonation(ctx context.Context, username string) (*Response, error) { - u := fmt.Sprintf("admin/users/%s/authorizations", username) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/apps.go deleted file mode 100644 index ab83d59ab2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// AppsService provides access to the installation related functions -// in the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/ -type AppsService service - -// App represents a GitHub App. -type App struct { - ID *int64 `json:"id,omitempty"` - Slug *string `json:"slug,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - ExternalURL *string `json:"external_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Permissions *InstallationPermissions `json:"permissions,omitempty"` - Events []string `json:"events,omitempty"` - InstallationsCount *int `json:"installations_count,omitempty"` -} - -// InstallationToken represents an installation token. -type InstallationToken struct { - Token *string `json:"token,omitempty"` - ExpiresAt *Timestamp `json:"expires_at,omitempty"` - Permissions *InstallationPermissions `json:"permissions,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` -} - -// InstallationTokenOptions allow restricting a token's access to specific repositories. -type InstallationTokenOptions struct { - // The IDs of the repositories that the installation token can access. - // Providing repository IDs restricts the access of an installation token to specific repositories. - RepositoryIDs []int64 `json:"repository_ids,omitempty"` - - // The names of the repositories that the installation token can access. - // Providing repository names restricts the access of an installation token to specific repositories. - Repositories []string `json:"repositories,omitempty"` - - // The permissions granted to the access token. - // The permissions object includes the permission names and their access type. - Permissions *InstallationPermissions `json:"permissions,omitempty"` -} - -// InstallationPermissions lists the repository and organization permissions for an installation. -// -// Permission names taken from: -// -// https://docs.github.com/en/enterprise-server@3.0/rest/apps#create-an-installation-access-token-for-an-app -// https://docs.github.com/en/rest/apps#create-an-installation-access-token-for-an-app -type InstallationPermissions struct { - Actions *string `json:"actions,omitempty"` - Administration *string `json:"administration,omitempty"` - Blocking *string `json:"blocking,omitempty"` - Checks *string `json:"checks,omitempty"` - Contents *string `json:"contents,omitempty"` - ContentReferences *string `json:"content_references,omitempty"` - Deployments *string `json:"deployments,omitempty"` - Emails *string `json:"emails,omitempty"` - Environments *string `json:"environments,omitempty"` - Followers *string `json:"followers,omitempty"` - Issues *string `json:"issues,omitempty"` - Metadata *string `json:"metadata,omitempty"` - Members *string `json:"members,omitempty"` - OrganizationAdministration *string `json:"organization_administration,omitempty"` - OrganizationCustomRoles *string `json:"organization_custom_roles,omitempty"` - OrganizationHooks *string `json:"organization_hooks,omitempty"` - OrganizationPackages *string `json:"organization_packages,omitempty"` - OrganizationPlan *string `json:"organization_plan,omitempty"` - OrganizationPreReceiveHooks *string `json:"organization_pre_receive_hooks,omitempty"` - OrganizationProjects *string `json:"organization_projects,omitempty"` - OrganizationSecrets *string `json:"organization_secrets,omitempty"` - OrganizationSelfHostedRunners *string `json:"organization_self_hosted_runners,omitempty"` - OrganizationUserBlocking *string `json:"organization_user_blocking,omitempty"` - Packages *string `json:"packages,omitempty"` - Pages *string `json:"pages,omitempty"` - PullRequests *string `json:"pull_requests,omitempty"` - RepositoryHooks *string `json:"repository_hooks,omitempty"` - RepositoryProjects *string `json:"repository_projects,omitempty"` - RepositoryPreReceiveHooks *string `json:"repository_pre_receive_hooks,omitempty"` - Secrets *string `json:"secrets,omitempty"` - SecretScanningAlerts *string `json:"secret_scanning_alerts,omitempty"` - SecurityEvents *string `json:"security_events,omitempty"` - SingleFile *string `json:"single_file,omitempty"` - Statuses *string `json:"statuses,omitempty"` - TeamDiscussions *string `json:"team_discussions,omitempty"` - VulnerabilityAlerts *string `json:"vulnerability_alerts,omitempty"` - Workflows *string `json:"workflows,omitempty"` -} - -// Installation represents a GitHub Apps installation. -type Installation struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - AppID *int64 `json:"app_id,omitempty"` - AppSlug *string `json:"app_slug,omitempty"` - TargetID *int64 `json:"target_id,omitempty"` - Account *User `json:"account,omitempty"` - AccessTokensURL *string `json:"access_tokens_url,omitempty"` - RepositoriesURL *string `json:"repositories_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - TargetType *string `json:"target_type,omitempty"` - SingleFileName *string `json:"single_file_name,omitempty"` - RepositorySelection *string `json:"repository_selection,omitempty"` - Events []string `json:"events,omitempty"` - SingleFilePaths []string `json:"single_file_paths,omitempty"` - Permissions *InstallationPermissions `json:"permissions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - HasMultipleSingleFiles *bool `json:"has_multiple_single_files,omitempty"` - SuspendedBy *User `json:"suspended_by,omitempty"` - SuspendedAt *Timestamp `json:"suspended_at,omitempty"` -} - -// Attachment represents a GitHub Apps attachment. -type Attachment struct { - ID *int64 `json:"id,omitempty"` - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` -} - -// ContentReference represents a reference to a URL in an issue or pull request. -type ContentReference struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Reference *string `json:"reference,omitempty"` -} - -func (i Installation) String() string { - return Stringify(i) -} - -// Get a single GitHub App. Passing the empty string will get -// the authenticated GitHub App. -// -// Note: appSlug is just the URL-friendly name of your GitHub App. -// You can find this on the settings page for your GitHub App -// (e.g., https://github.com/settings/apps/:app_slug). -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-the-authenticated-app -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-app -func (s *AppsService) Get(ctx context.Context, appSlug string) (*App, *Response, error) { - var u string - if appSlug != "" { - u = fmt.Sprintf("apps/%v", appSlug) - } else { - u = "app" - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - app := new(App) - resp, err := s.client.Do(ctx, req, app) - if err != nil { - return nil, resp, err - } - - return app, resp, nil -} - -// ListInstallations lists the installations that the current GitHub App has. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#list-installations-for-the-authenticated-app -func (s *AppsService) ListInstallations(ctx context.Context, opts *ListOptions) ([]*Installation, *Response, error) { - u, err := addOptions("app/installations", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var i []*Installation - resp, err := s.client.Do(ctx, req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// GetInstallation returns the specified installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app -func (s *AppsService) GetInstallation(ctx context.Context, id int64) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("app/installations/%v", id)) -} - -// ListUserInstallations lists installations that are accessible to the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/installations#list-app-installations-accessible-to-the-user-access-token -func (s *AppsService) ListUserInstallations(ctx context.Context, opts *ListOptions) ([]*Installation, *Response, error) { - u, err := addOptions("user/installations", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var i struct { - Installations []*Installation `json:"installations"` - } - resp, err := s.client.Do(ctx, req, &i) - if err != nil { - return nil, resp, err - } - - return i.Installations, resp, nil -} - -// SuspendInstallation suspends the specified installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#suspend-an-app-installation -func (s *AppsService) SuspendInstallation(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("app/installations/%v/suspended", id) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// UnsuspendInstallation unsuspends the specified installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#unsuspend-an-app-installation -func (s *AppsService) UnsuspendInstallation(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("app/installations/%v/suspended", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteInstallation deletes the specified installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#delete-an-installation-for-the-authenticated-app -func (s *AppsService) DeleteInstallation(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("app/installations/%v", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// CreateInstallationToken creates a new installation token. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app -func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64, opts *InstallationTokenOptions) (*InstallationToken, *Response, error) { - u := fmt.Sprintf("app/installations/%v/access_tokens", id) - - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - t := new(InstallationToken) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// CreateAttachment creates a new attachment on user comment containing a url. -// -// TODO: Find GitHub API docs. -func (s *AppsService) CreateAttachment(ctx context.Context, contentReferenceID int64, title, body string) (*Attachment, *Response, error) { - u := fmt.Sprintf("content_references/%v/attachments", contentReferenceID) - payload := &Attachment{Title: String(title), Body: String(body)} - req, err := s.client.NewRequest("POST", u, payload) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeContentAttachmentsPreview) - - m := &Attachment{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// FindOrganizationInstallation finds the organization's installation information. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app -func (s *AppsService) FindOrganizationInstallation(ctx context.Context, org string) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("orgs/%v/installation", org)) -} - -// FindRepositoryInstallation finds the repository's installation information. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-a-repository-installation-for-the-authenticated-app -func (s *AppsService) FindRepositoryInstallation(ctx context.Context, owner, repo string) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("repos/%v/%v/installation", owner, repo)) -} - -// FindRepositoryInstallationByID finds the repository's installation information. -// -// Note: FindRepositoryInstallationByID uses the undocumented GitHub API endpoint /repositories/:id/installation. -func (s *AppsService) FindRepositoryInstallationByID(ctx context.Context, id int64) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("repositories/%d/installation", id)) -} - -// FindUserInstallation finds the user's installation information. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app -func (s *AppsService) FindUserInstallation(ctx context.Context, user string) (*Installation, *Response, error) { - return s.getInstallation(ctx, fmt.Sprintf("users/%v/installation", user)) -} - -func (s *AppsService) getInstallation(ctx context.Context, url string) (*Installation, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - i := new(Installation) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_hooks.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_hooks.go deleted file mode 100644 index e3bd2afc03..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_hooks.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" -) - -// GetHookConfig returns the webhook configuration for a GitHub App. -// The underlying transport must be authenticated as an app. -// -// GitHub API docs: https://docs.github.com/en/rest/apps#get-a-webhook-configuration-for-an-app -func (s *AppsService) GetHookConfig(ctx context.Context) (*HookConfig, *Response, error) { - req, err := s.client.NewRequest("GET", "app/hook/config", nil) - if err != nil { - return nil, nil, err - } - - config := new(HookConfig) - resp, err := s.client.Do(ctx, req, &config) - if err != nil { - return nil, resp, err - } - - return config, resp, nil -} - -// UpdateHookConfig updates the webhook configuration for a GitHub App. -// The underlying transport must be authenticated as an app. -// -// GitHub API docs: https://docs.github.com/en/rest/apps#update-a-webhook-configuration-for-an-app -func (s *AppsService) UpdateHookConfig(ctx context.Context, config *HookConfig) (*HookConfig, *Response, error) { - req, err := s.client.NewRequest("PATCH", "app/hook/config", config) - if err != nil { - return nil, nil, err - } - - c := new(HookConfig) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_hooks_deliveries.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_hooks_deliveries.go deleted file mode 100644 index 33102f36d2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_hooks_deliveries.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListHookDeliveries lists deliveries of an App webhook. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/webhooks#list-deliveries-for-an-app-webhook -func (s *AppsService) ListHookDeliveries(ctx context.Context, opts *ListCursorOptions) ([]*HookDelivery, *Response, error) { - u, err := addOptions("app/hook/deliveries", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - deliveries := []*HookDelivery{} - resp, err := s.client.Do(ctx, req, &deliveries) - if err != nil { - return nil, resp, err - } - - return deliveries, resp, nil -} - -// GetHookDelivery returns the App webhook delivery with the specified ID. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/webhooks#get-a-delivery-for-an-app-webhook -func (s *AppsService) GetHookDelivery(ctx context.Context, deliveryID int64) (*HookDelivery, *Response, error) { - u := fmt.Sprintf("app/hook/deliveries/%v", deliveryID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - h := new(HookDelivery) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// RedeliverHookDelivery redelivers a delivery for an App webhook. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/webhooks#redeliver-a-delivery-for-an-app-webhook -func (s *AppsService) RedeliverHookDelivery(ctx context.Context, deliveryID int64) (*HookDelivery, *Response, error) { - u := fmt.Sprintf("app/hook/deliveries/%v/attempts", deliveryID) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - h := new(HookDelivery) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_installation.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_installation.go deleted file mode 100644 index b619080713..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_installation.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" -) - -// ListRepositories represents the response from the list repos endpoints. -type ListRepositories struct { - TotalCount *int `json:"total_count,omitempty"` - Repositories []*Repository `json:"repositories"` -} - -// ListRepos lists the repositories that are accessible to the authenticated installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/installations#list-repositories-accessible-to-the-app-installation -func (s *AppsService) ListRepos(ctx context.Context, opts *ListOptions) (*ListRepositories, *Response, error) { - u, err := addOptions("installation/repositories", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{ - mediaTypeTopicsPreview, - mediaTypeRepositoryVisibilityPreview, - mediaTypeRepositoryTemplatePreview, - } - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var r *ListRepositories - - resp, err := s.client.Do(ctx, req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// ListUserRepos lists repositories that are accessible -// to the authenticated user for an installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/installations#list-repositories-accessible-to-the-user-access-token -func (s *AppsService) ListUserRepos(ctx context.Context, id int64, opts *ListOptions) (*ListRepositories, *Response, error) { - u := fmt.Sprintf("user/installations/%v/repositories", id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{ - mediaTypeTopicsPreview, - mediaTypeRepositoryVisibilityPreview, - mediaTypeRepositoryTemplatePreview, - } - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var r *ListRepositories - resp, err := s.client.Do(ctx, req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// AddRepository adds a single repository to an installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/installations#add-a-repository-to-an-app-installation -func (s *AppsService) AddRepository(ctx context.Context, instID, repoID int64) (*Repository, *Response, error) { - u := fmt.Sprintf("user/installations/%v/repositories/%v", instID, repoID) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// RemoveRepository removes a single repository from an installation. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/installations#remove-a-repository-from-an-app-installation -func (s *AppsService) RemoveRepository(ctx context.Context, instID, repoID int64) (*Response, error) { - u := fmt.Sprintf("user/installations/%v/repositories/%v", instID, repoID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RevokeInstallationToken revokes an installation token. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/installations#revoke-an-installation-access-token -func (s *AppsService) RevokeInstallationToken(ctx context.Context) (*Response, error) { - u := "installation/token" - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_manifest.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_manifest.go deleted file mode 100644 index fa4c85379c..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_manifest.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// AppConfig describes the configuration of a GitHub App. -type AppConfig struct { - ID *int64 `json:"id,omitempty"` - Slug *string `json:"slug,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - ExternalURL *string `json:"external_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - ClientID *string `json:"client_id,omitempty"` - ClientSecret *string `json:"client_secret,omitempty"` - WebhookSecret *string `json:"webhook_secret,omitempty"` - PEM *string `json:"pem,omitempty"` -} - -// CompleteAppManifest completes the App manifest handshake flow for the given -// code. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/apps#create-a-github-app-from-a-manifest -func (s *AppsService) CompleteAppManifest(ctx context.Context, code string) (*AppConfig, *Response, error) { - u := fmt.Sprintf("app-manifests/%s/conversions", code) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - cfg := new(AppConfig) - resp, err := s.client.Do(ctx, req, cfg) - if err != nil { - return nil, resp, err - } - - return cfg, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_marketplace.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_marketplace.go deleted file mode 100644 index 32889abd24..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/apps_marketplace.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// MarketplaceService handles communication with the marketplace related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/apps#marketplace -type MarketplaceService struct { - client *Client - // Stubbed controls whether endpoints that return stubbed data are used - // instead of production endpoints. Stubbed data is fake data that's useful - // for testing your GitHub Apps. Stubbed data is hard-coded and will not - // change based on actual subscriptions. - // - // GitHub API docs: https://docs.github.com/en/rest/apps#testing-with-stubbed-endpoints - Stubbed bool -} - -// MarketplacePlan represents a GitHub Apps Marketplace Listing Plan. -type MarketplacePlan struct { - URL *string `json:"url,omitempty"` - AccountsURL *string `json:"accounts_url,omitempty"` - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - MonthlyPriceInCents *int `json:"monthly_price_in_cents,omitempty"` - YearlyPriceInCents *int `json:"yearly_price_in_cents,omitempty"` - // The pricing model for this listing. Can be one of "flat-rate", "per-unit", or "free". - PriceModel *string `json:"price_model,omitempty"` - UnitName *string `json:"unit_name,omitempty"` - Bullets *[]string `json:"bullets,omitempty"` - // State can be one of the values "draft" or "published". - State *string `json:"state,omitempty"` - HasFreeTrial *bool `json:"has_free_trial,omitempty"` -} - -// MarketplacePurchase represents a GitHub Apps Marketplace Purchase. -type MarketplacePurchase struct { - Account *MarketplacePurchaseAccount `json:"account,omitempty"` - // BillingCycle can be one of the values "yearly", "monthly" or nil. - BillingCycle *string `json:"billing_cycle,omitempty"` - NextBillingDate *Timestamp `json:"next_billing_date,omitempty"` - UnitCount *int `json:"unit_count,omitempty"` - Plan *MarketplacePlan `json:"plan,omitempty"` - OnFreeTrial *bool `json:"on_free_trial,omitempty"` - FreeTrialEndsOn *Timestamp `json:"free_trial_ends_on,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -// MarketplacePendingChange represents a pending change to a GitHub Apps Marketplace Plan. -type MarketplacePendingChange struct { - EffectiveDate *Timestamp `json:"effective_date,omitempty"` - UnitCount *int `json:"unit_count,omitempty"` - ID *int64 `json:"id,omitempty"` - Plan *MarketplacePlan `json:"plan,omitempty"` -} - -// MarketplacePlanAccount represents a GitHub Account (user or organization) on a specific plan. -type MarketplacePlanAccount struct { - URL *string `json:"url,omitempty"` - Type *string `json:"type,omitempty"` - ID *int64 `json:"id,omitempty"` - Login *string `json:"login,omitempty"` - OrganizationBillingEmail *string `json:"organization_billing_email,omitempty"` - MarketplacePurchase *MarketplacePurchase `json:"marketplace_purchase,omitempty"` - MarketplacePendingChange *MarketplacePendingChange `json:"marketplace_pending_change,omitempty"` -} - -// MarketplacePurchaseAccount represents a GitHub Account (user or organization) for a Purchase. -type MarketplacePurchaseAccount struct { - URL *string `json:"url,omitempty"` - Type *string `json:"type,omitempty"` - ID *int64 `json:"id,omitempty"` - Login *string `json:"login,omitempty"` - OrganizationBillingEmail *string `json:"organization_billing_email,omitempty"` - Email *string `json:"email,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// ListPlans lists all plans for your Marketplace listing. -// -// GitHub API docs: https://docs.github.com/en/rest/apps#list-plans -func (s *MarketplaceService) ListPlans(ctx context.Context, opts *ListOptions) ([]*MarketplacePlan, *Response, error) { - uri := s.marketplaceURI("plans") - u, err := addOptions(uri, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var plans []*MarketplacePlan - resp, err := s.client.Do(ctx, req, &plans) - if err != nil { - return nil, resp, err - } - - return plans, resp, nil -} - -// ListPlanAccountsForPlan lists all GitHub accounts (user or organization) on a specific plan. -// -// GitHub API docs: https://docs.github.com/en/rest/apps#list-accounts-for-a-plan -func (s *MarketplaceService) ListPlanAccountsForPlan(ctx context.Context, planID int64, opts *ListOptions) ([]*MarketplacePlanAccount, *Response, error) { - uri := s.marketplaceURI(fmt.Sprintf("plans/%v/accounts", planID)) - u, err := addOptions(uri, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var accounts []*MarketplacePlanAccount - resp, err := s.client.Do(ctx, req, &accounts) - if err != nil { - return nil, resp, err - } - - return accounts, resp, nil -} - -// GetPlanAccountForAccount get GitHub account (user or organization) associated with an account. -// -// GitHub API docs: https://docs.github.com/en/rest/apps#get-a-subscription-plan-for-an-account -func (s *MarketplaceService) GetPlanAccountForAccount(ctx context.Context, accountID int64) (*MarketplacePlanAccount, *Response, error) { - uri := s.marketplaceURI(fmt.Sprintf("accounts/%v", accountID)) - - req, err := s.client.NewRequest("GET", uri, nil) - if err != nil { - return nil, nil, err - } - - var account *MarketplacePlanAccount - resp, err := s.client.Do(ctx, req, &account) - if err != nil { - return nil, resp, err - } - - return account, resp, nil -} - -// ListMarketplacePurchasesForUser lists all GitHub marketplace purchases made by a user. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/marketplace#list-subscriptions-for-the-authenticated-user-stubbed -// GitHub API docs: https://docs.github.com/en/rest/apps/marketplace#list-subscriptions-for-the-authenticated-user -func (s *MarketplaceService) ListMarketplacePurchasesForUser(ctx context.Context, opts *ListOptions) ([]*MarketplacePurchase, *Response, error) { - uri := "user/marketplace_purchases" - if s.Stubbed { - uri = "user/marketplace_purchases/stubbed" - } - - u, err := addOptions(uri, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var purchases []*MarketplacePurchase - resp, err := s.client.Do(ctx, req, &purchases) - if err != nil { - return nil, resp, err - } - return purchases, resp, nil -} - -func (s *MarketplaceService) marketplaceURI(endpoint string) string { - url := "marketplace_listing" - if s.Stubbed { - url = "marketplace_listing/stubbed" - } - return url + "/" + endpoint -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/authorizations.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/authorizations.go deleted file mode 100644 index ea0897e362..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/authorizations.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2015 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Scope models a GitHub authorization scope. -// -// GitHub API docs: https://docs.github.com/en/rest/oauth/#scopes -type Scope string - -// This is the set of scopes for GitHub API V3 -const ( - ScopeNone Scope = "(no scope)" // REVISIT: is this actually returned, or just a documentation artifact? - ScopeUser Scope = "user" - ScopeUserEmail Scope = "user:email" - ScopeUserFollow Scope = "user:follow" - ScopePublicRepo Scope = "public_repo" - ScopeRepo Scope = "repo" - ScopeRepoDeployment Scope = "repo_deployment" - ScopeRepoStatus Scope = "repo:status" - ScopeDeleteRepo Scope = "delete_repo" - ScopeNotifications Scope = "notifications" - ScopeGist Scope = "gist" - ScopeReadRepoHook Scope = "read:repo_hook" - ScopeWriteRepoHook Scope = "write:repo_hook" - ScopeAdminRepoHook Scope = "admin:repo_hook" - ScopeAdminOrgHook Scope = "admin:org_hook" - ScopeReadOrg Scope = "read:org" - ScopeWriteOrg Scope = "write:org" - ScopeAdminOrg Scope = "admin:org" - ScopeReadPublicKey Scope = "read:public_key" - ScopeWritePublicKey Scope = "write:public_key" - ScopeAdminPublicKey Scope = "admin:public_key" - ScopeReadGPGKey Scope = "read:gpg_key" - ScopeWriteGPGKey Scope = "write:gpg_key" - ScopeAdminGPGKey Scope = "admin:gpg_key" - ScopeSecurityEvents Scope = "security_events" -) - -// AuthorizationsService handles communication with the authorization related -// methods of the GitHub API. -// -// This service requires HTTP Basic Authentication; it cannot be accessed using -// an OAuth token. -// -// GitHub API docs: https://docs.github.com/en/rest/oauth-authorizations -type AuthorizationsService service - -// Authorization represents an individual GitHub authorization. -type Authorization struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Scopes []Scope `json:"scopes,omitempty"` - Token *string `json:"token,omitempty"` - TokenLastEight *string `json:"token_last_eight,omitempty"` - HashedToken *string `json:"hashed_token,omitempty"` - App *AuthorizationApp `json:"app,omitempty"` - Note *string `json:"note,omitempty"` - NoteURL *string `json:"note_url,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` - - // User is only populated by the Check and Reset methods. - User *User `json:"user,omitempty"` -} - -func (a Authorization) String() string { - return Stringify(a) -} - -// AuthorizationApp represents an individual GitHub app (in the context of authorization). -type AuthorizationApp struct { - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - ClientID *string `json:"client_id,omitempty"` -} - -func (a AuthorizationApp) String() string { - return Stringify(a) -} - -// Grant represents an OAuth application that has been granted access to an account. -type Grant struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - App *AuthorizationApp `json:"app,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Scopes []string `json:"scopes,omitempty"` -} - -func (g Grant) String() string { - return Stringify(g) -} - -// AuthorizationRequest represents a request to create an authorization. -type AuthorizationRequest struct { - Scopes []Scope `json:"scopes,omitempty"` - Note *string `json:"note,omitempty"` - NoteURL *string `json:"note_url,omitempty"` - ClientID *string `json:"client_id,omitempty"` - ClientSecret *string `json:"client_secret,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` -} - -func (a AuthorizationRequest) String() string { - return Stringify(a) -} - -// AuthorizationUpdateRequest represents a request to update an authorization. -// -// Note that for any one update, you must only provide one of the "scopes" -// fields. That is, you may provide only one of "Scopes", or "AddScopes", or -// "RemoveScopes". -// -// GitHub API docs: https://docs.github.com/en/rest/oauth-authorizations#update-an-existing-authorization -type AuthorizationUpdateRequest struct { - Scopes []string `json:"scopes,omitempty"` - AddScopes []string `json:"add_scopes,omitempty"` - RemoveScopes []string `json:"remove_scopes,omitempty"` - Note *string `json:"note,omitempty"` - NoteURL *string `json:"note_url,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` -} - -func (a AuthorizationUpdateRequest) String() string { - return Stringify(a) -} - -// Check if an OAuth token is valid for a specific app. -// -// Note that this operation requires the use of BasicAuth, but where the -// username is the OAuth application clientID, and the password is its -// clientSecret. Invalid tokens will return a 404 Not Found. -// -// The returned Authorization.User field will be populated. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#check-a-token -func (s *AuthorizationsService) Check(ctx context.Context, clientID, accessToken string) (*Authorization, *Response, error) { - u := fmt.Sprintf("applications/%v/token", clientID) - - reqBody := &struct { - AccessToken string `json:"access_token"` - }{AccessToken: accessToken} - - req, err := s.client.NewRequest("POST", u, reqBody) - if err != nil { - return nil, nil, err - } - req.Header.Set("Accept", mediaTypeOAuthAppPreview) - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// Reset is used to reset a valid OAuth token without end user involvement. -// Applications must save the "token" property in the response, because changes -// take effect immediately. -// -// Note that this operation requires the use of BasicAuth, but where the -// username is the OAuth application clientID, and the password is its -// clientSecret. Invalid tokens will return a 404 Not Found. -// -// The returned Authorization.User field will be populated. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#reset-a-token -func (s *AuthorizationsService) Reset(ctx context.Context, clientID, accessToken string) (*Authorization, *Response, error) { - u := fmt.Sprintf("applications/%v/token", clientID) - - reqBody := &struct { - AccessToken string `json:"access_token"` - }{AccessToken: accessToken} - - req, err := s.client.NewRequest("PATCH", u, reqBody) - if err != nil { - return nil, nil, err - } - req.Header.Set("Accept", mediaTypeOAuthAppPreview) - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// Revoke an authorization for an application. -// -// Note that this operation requires the use of BasicAuth, but where the -// username is the OAuth application clientID, and the password is its -// clientSecret. Invalid tokens will return a 404 Not Found. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-token -func (s *AuthorizationsService) Revoke(ctx context.Context, clientID, accessToken string) (*Response, error) { - u := fmt.Sprintf("applications/%v/token", clientID) - - reqBody := &struct { - AccessToken string `json:"access_token"` - }{AccessToken: accessToken} - - req, err := s.client.NewRequest("DELETE", u, reqBody) - if err != nil { - return nil, err - } - req.Header.Set("Accept", mediaTypeOAuthAppPreview) - - return s.client.Do(ctx, req, nil) -} - -// DeleteGrant deletes an OAuth application grant. Deleting an application's -// grant will also delete all OAuth tokens associated with the application for -// the user. -// -// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization -func (s *AuthorizationsService) DeleteGrant(ctx context.Context, clientID, accessToken string) (*Response, error) { - u := fmt.Sprintf("applications/%v/grant", clientID) - - reqBody := &struct { - AccessToken string `json:"access_token"` - }{AccessToken: accessToken} - - req, err := s.client.NewRequest("DELETE", u, reqBody) - if err != nil { - return nil, err - } - req.Header.Set("Accept", mediaTypeOAuthAppPreview) - - return s.client.Do(ctx, req, nil) -} - -// CreateImpersonation creates an impersonation OAuth token. -// -// This requires admin permissions. With the returned Authorization.Token -// you can e.g. create or delete a user's public SSH key. NOTE: creating a -// new token automatically revokes an existing one. -// -// GitHub API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#create-an-impersonation-oauth-token -func (s *AuthorizationsService) CreateImpersonation(ctx context.Context, username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) { - u := fmt.Sprintf("admin/users/%v/authorizations", username) - req, err := s.client.NewRequest("POST", u, authReq) - if err != nil { - return nil, nil, err - } - - a := new(Authorization) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - return a, resp, nil -} - -// DeleteImpersonation deletes an impersonation OAuth token. -// -// NOTE: there can be only one at a time. -// -// GitHub API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#delete-an-impersonation-oauth-token -func (s *AuthorizationsService) DeleteImpersonation(ctx context.Context, username string) (*Response, error) { - u := fmt.Sprintf("admin/users/%v/authorizations", username) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/billing.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/billing.go deleted file mode 100644 index 7a76bf86fd..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/billing.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// BillingService provides access to the billing related functions -// in the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/billing -type BillingService service - -// ActionBilling represents a GitHub Action billing. -type ActionBilling struct { - TotalMinutesUsed float64 `json:"total_minutes_used"` - TotalPaidMinutesUsed float64 `json:"total_paid_minutes_used"` - IncludedMinutes float64 `json:"included_minutes"` - MinutesUsedBreakdown MinutesUsedBreakdown `json:"minutes_used_breakdown"` -} - -// MinutesUsedBreakdown counts the actions minutes used by machine type (e.g. UBUNTU, WINDOWS, MACOS). -type MinutesUsedBreakdown = map[string]int - -// PackageBilling represents a GitHub Package billing. -type PackageBilling struct { - TotalGigabytesBandwidthUsed int `json:"total_gigabytes_bandwidth_used"` - TotalPaidGigabytesBandwidthUsed int `json:"total_paid_gigabytes_bandwidth_used"` - IncludedGigabytesBandwidth float64 `json:"included_gigabytes_bandwidth"` -} - -// StorageBilling represents a GitHub Storage billing. -type StorageBilling struct { - DaysLeftInBillingCycle int `json:"days_left_in_billing_cycle"` - EstimatedPaidStorageForMonth float64 `json:"estimated_paid_storage_for_month"` - EstimatedStorageForMonth float64 `json:"estimated_storage_for_month"` -} - -// ActiveCommitters represents the total active committers across all repositories in an Organization. -type ActiveCommitters struct { - TotalAdvancedSecurityCommitters int `json:"total_advanced_security_committers"` - Repositories []*RepositoryActiveCommitters `json:"repositories,omitempty"` -} - -// RepositoryActiveCommitters represents active committers on each repository. -type RepositoryActiveCommitters struct { - Name *string `json:"name,omitempty"` - AdvancedSecurityCommitters *int `json:"advanced_security_committers,omitempty"` - AdvancedSecurityCommittersBreakdown []*AdvancedSecurityCommittersBreakdown `json:"advanced_security_committers_breakdown,omitempty"` -} - -// AdvancedSecurityCommittersBreakdown represents the user activity breakdown for ActiveCommitters. -type AdvancedSecurityCommittersBreakdown struct { - UserLogin *string `json:"user_login,omitempty"` - LastPushedDate *string `json:"last_pushed_date,omitempty"` -} - -// GetActionsBillingOrg returns the summary of the free and paid GitHub Actions minutes used for an Org. -// -// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-actions-billing-for-an-organization -func (s *BillingService) GetActionsBillingOrg(ctx context.Context, org string) (*ActionBilling, *Response, error) { - u := fmt.Sprintf("orgs/%v/settings/billing/actions", org) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - actionsOrgBilling := new(ActionBilling) - resp, err := s.client.Do(ctx, req, actionsOrgBilling) - if err != nil { - return nil, resp, err - } - - return actionsOrgBilling, resp, nil -} - -// GetPackagesBillingOrg returns the free and paid storage used for GitHub Packages in gigabytes for an Org. -// -// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-packages-billing-for-an-organization -func (s *BillingService) GetPackagesBillingOrg(ctx context.Context, org string) (*PackageBilling, *Response, error) { - u := fmt.Sprintf("orgs/%v/settings/billing/packages", org) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - packagesOrgBilling := new(PackageBilling) - resp, err := s.client.Do(ctx, req, packagesOrgBilling) - if err != nil { - return nil, resp, err - } - - return packagesOrgBilling, resp, nil -} - -// GetStorageBillingOrg returns the estimated paid and estimated total storage used for GitHub Actions -// and GitHub Packages in gigabytes for an Org. -// -// GitHub API docs: https://docs.github.com/en/rest/billing#get-shared-storage-billing-for-an-organization -func (s *BillingService) GetStorageBillingOrg(ctx context.Context, org string) (*StorageBilling, *Response, error) { - u := fmt.Sprintf("orgs/%v/settings/billing/shared-storage", org) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - storageOrgBilling := new(StorageBilling) - resp, err := s.client.Do(ctx, req, storageOrgBilling) - if err != nil { - return nil, resp, err - } - - return storageOrgBilling, resp, nil -} - -// GetAdvancedSecurityActiveCommittersOrg returns the GitHub Advanced Security active committers for an organization per repository. -// -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/billing?apiVersion=2022-11-28#get-github-advanced-security-active-committers-for-an-organization -func (s *BillingService) GetAdvancedSecurityActiveCommittersOrg(ctx context.Context, org string, opts *ListOptions) (*ActiveCommitters, *Response, error) { - u := fmt.Sprintf("orgs/%v/settings/billing/advanced-security", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - activeOrgCommitters := new(ActiveCommitters) - resp, err := s.client.Do(ctx, req, activeOrgCommitters) - if err != nil { - return nil, resp, err - } - - return activeOrgCommitters, resp, nil -} - -// GetActionsBillingUser returns the summary of the free and paid GitHub Actions minutes used for a user. -// -// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-actions-billing-for-a-user -func (s *BillingService) GetActionsBillingUser(ctx context.Context, user string) (*ActionBilling, *Response, error) { - u := fmt.Sprintf("users/%v/settings/billing/actions", user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - actionsUserBilling := new(ActionBilling) - resp, err := s.client.Do(ctx, req, actionsUserBilling) - if err != nil { - return nil, resp, err - } - - return actionsUserBilling, resp, nil -} - -// GetPackagesBillingUser returns the free and paid storage used for GitHub Packages in gigabytes for a user. -// -// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-packages-billing-for-a-user -func (s *BillingService) GetPackagesBillingUser(ctx context.Context, user string) (*PackageBilling, *Response, error) { - u := fmt.Sprintf("users/%v/settings/billing/packages", user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - packagesUserBilling := new(PackageBilling) - resp, err := s.client.Do(ctx, req, packagesUserBilling) - if err != nil { - return nil, resp, err - } - - return packagesUserBilling, resp, nil -} - -// GetStorageBillingUser returns the estimated paid and estimated total storage used for GitHub Actions -// and GitHub Packages in gigabytes for a user. -// -// GitHub API docs: https://docs.github.com/en/rest/billing#get-shared-storage-billing-for-a-user -func (s *BillingService) GetStorageBillingUser(ctx context.Context, user string) (*StorageBilling, *Response, error) { - u := fmt.Sprintf("users/%v/settings/billing/shared-storage", user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - storageUserBilling := new(StorageBilling) - resp, err := s.client.Do(ctx, req, storageUserBilling) - if err != nil { - return nil, resp, err - } - - return storageUserBilling, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/checks.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/checks.go deleted file mode 100644 index 12d08530ca..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/checks.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ChecksService provides access to the Checks API in the -// GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/ -type ChecksService service - -// CheckRun represents a GitHub check run on a repository associated with a GitHub app. -type CheckRun struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - ExternalID *string `json:"external_id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DetailsURL *string `json:"details_url,omitempty"` - Status *string `json:"status,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - StartedAt *Timestamp `json:"started_at,omitempty"` - CompletedAt *Timestamp `json:"completed_at,omitempty"` - Output *CheckRunOutput `json:"output,omitempty"` - Name *string `json:"name,omitempty"` - CheckSuite *CheckSuite `json:"check_suite,omitempty"` - App *App `json:"app,omitempty"` - PullRequests []*PullRequest `json:"pull_requests,omitempty"` -} - -// CheckRunOutput represents the output of a CheckRun. -type CheckRunOutput struct { - Title *string `json:"title,omitempty"` - Summary *string `json:"summary,omitempty"` - Text *string `json:"text,omitempty"` - AnnotationsCount *int `json:"annotations_count,omitempty"` - AnnotationsURL *string `json:"annotations_url,omitempty"` - Annotations []*CheckRunAnnotation `json:"annotations,omitempty"` - Images []*CheckRunImage `json:"images,omitempty"` -} - -// CheckRunAnnotation represents an annotation object for a CheckRun output. -type CheckRunAnnotation struct { - Path *string `json:"path,omitempty"` - StartLine *int `json:"start_line,omitempty"` - EndLine *int `json:"end_line,omitempty"` - StartColumn *int `json:"start_column,omitempty"` - EndColumn *int `json:"end_column,omitempty"` - AnnotationLevel *string `json:"annotation_level,omitempty"` - Message *string `json:"message,omitempty"` - Title *string `json:"title,omitempty"` - RawDetails *string `json:"raw_details,omitempty"` -} - -// CheckRunImage represents an image object for a CheckRun output. -type CheckRunImage struct { - Alt *string `json:"alt,omitempty"` - ImageURL *string `json:"image_url,omitempty"` - Caption *string `json:"caption,omitempty"` -} - -// CheckSuite represents a suite of check runs. -type CheckSuite struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - HeadBranch *string `json:"head_branch,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - URL *string `json:"url,omitempty"` - BeforeSHA *string `json:"before,omitempty"` - AfterSHA *string `json:"after,omitempty"` - Status *string `json:"status,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - App *App `json:"app,omitempty"` - Repository *Repository `json:"repository,omitempty"` - PullRequests []*PullRequest `json:"pull_requests,omitempty"` - - // The following fields are only populated by Webhook events. - HeadCommit *Commit `json:"head_commit,omitempty"` -} - -func (c CheckRun) String() string { - return Stringify(c) -} - -func (c CheckSuite) String() string { - return Stringify(c) -} - -// GetCheckRun gets a check-run for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/runs#get-a-check-run -func (s *ChecksService) GetCheckRun(ctx context.Context, owner, repo string, checkRunID int64) (*CheckRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs/%v", owner, repo, checkRunID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkRun := new(CheckRun) - resp, err := s.client.Do(ctx, req, checkRun) - if err != nil { - return nil, resp, err - } - - return checkRun, resp, nil -} - -// GetCheckSuite gets a single check suite. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/suites#get-a-check-suite -func (s *ChecksService) GetCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64) (*CheckSuite, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/%v", owner, repo, checkSuiteID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkSuite := new(CheckSuite) - resp, err := s.client.Do(ctx, req, checkSuite) - if err != nil { - return nil, resp, err - } - - return checkSuite, resp, nil -} - -// CreateCheckRunOptions sets up parameters needed to create a CheckRun. -type CreateCheckRunOptions struct { - Name string `json:"name"` // The name of the check (e.g., "code-coverage"). (Required.) - HeadSHA string `json:"head_sha"` // The SHA of the commit. (Required.) - DetailsURL *string `json:"details_url,omitempty"` // The URL of the integrator's site that has the full details of the check. (Optional.) - ExternalID *string `json:"external_id,omitempty"` // A reference for the run on the integrator's system. (Optional.) - Status *string `json:"status,omitempty"` // The current status. Can be one of "queued", "in_progress", or "completed". Default: "queued". (Optional.) - Conclusion *string `json:"conclusion,omitempty"` // Can be one of "success", "failure", "neutral", "cancelled", "skipped", "timed_out", or "action_required". (Optional. Required if you provide a status of "completed".) - StartedAt *Timestamp `json:"started_at,omitempty"` // The time that the check run began. (Optional.) - CompletedAt *Timestamp `json:"completed_at,omitempty"` // The time the check completed. (Optional. Required if you provide conclusion.) - Output *CheckRunOutput `json:"output,omitempty"` // Provide descriptive details about the run. (Optional) - Actions []*CheckRunAction `json:"actions,omitempty"` // Possible further actions the integrator can perform, which a user may trigger. (Optional.) -} - -// CheckRunAction exposes further actions the integrator can perform, which a user may trigger. -type CheckRunAction struct { - Label string `json:"label"` // The text to be displayed on a button in the web UI. The maximum size is 20 characters. (Required.) - Description string `json:"description"` // A short explanation of what this action would do. The maximum size is 40 characters. (Required.) - Identifier string `json:"identifier"` // A reference for the action on the integrator's system. The maximum size is 20 characters. (Required.) -} - -// CreateCheckRun creates a check run for repository. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/runs#create-a-check-run -func (s *ChecksService) CreateCheckRun(ctx context.Context, owner, repo string, opts CreateCheckRunOptions) (*CheckRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs", owner, repo) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkRun := new(CheckRun) - resp, err := s.client.Do(ctx, req, checkRun) - if err != nil { - return nil, resp, err - } - - return checkRun, resp, nil -} - -// UpdateCheckRunOptions sets up parameters needed to update a CheckRun. -type UpdateCheckRunOptions struct { - Name string `json:"name"` // The name of the check (e.g., "code-coverage"). (Required.) - DetailsURL *string `json:"details_url,omitempty"` // The URL of the integrator's site that has the full details of the check. (Optional.) - ExternalID *string `json:"external_id,omitempty"` // A reference for the run on the integrator's system. (Optional.) - Status *string `json:"status,omitempty"` // The current status. Can be one of "queued", "in_progress", or "completed". Default: "queued". (Optional.) - Conclusion *string `json:"conclusion,omitempty"` // Can be one of "success", "failure", "neutral", "cancelled", "skipped", "timed_out", or "action_required". (Optional. Required if you provide a status of "completed".) - CompletedAt *Timestamp `json:"completed_at,omitempty"` // The time the check completed. (Optional. Required if you provide conclusion.) - Output *CheckRunOutput `json:"output,omitempty"` // Provide descriptive details about the run. (Optional) - Actions []*CheckRunAction `json:"actions,omitempty"` // Possible further actions the integrator can perform, which a user may trigger. (Optional.) -} - -// UpdateCheckRun updates a check run for a specific commit in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/runs#update-a-check-run -func (s *ChecksService) UpdateCheckRun(ctx context.Context, owner, repo string, checkRunID int64, opts UpdateCheckRunOptions) (*CheckRun, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs/%v", owner, repo, checkRunID) - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkRun := new(CheckRun) - resp, err := s.client.Do(ctx, req, checkRun) - if err != nil { - return nil, resp, err - } - - return checkRun, resp, nil -} - -// ListCheckRunAnnotations lists the annotations for a check run. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/runs#list-check-run-annotations -func (s *ChecksService) ListCheckRunAnnotations(ctx context.Context, owner, repo string, checkRunID int64, opts *ListOptions) ([]*CheckRunAnnotation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs/%v/annotations", owner, repo, checkRunID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkRunAnnotations []*CheckRunAnnotation - resp, err := s.client.Do(ctx, req, &checkRunAnnotations) - if err != nil { - return nil, resp, err - } - - return checkRunAnnotations, resp, nil -} - -// ListCheckRunsOptions represents parameters to list check runs. -type ListCheckRunsOptions struct { - CheckName *string `url:"check_name,omitempty"` // Returns check runs with the specified name. - Status *string `url:"status,omitempty"` // Returns check runs with the specified status. Can be one of "queued", "in_progress", or "completed". - Filter *string `url:"filter,omitempty"` // Filters check runs by their completed_at timestamp. Can be one of "latest" (returning the most recent check runs) or "all". Default: "latest" - AppID *int64 `url:"app_id,omitempty"` // Filters check runs by GitHub App ID. - - ListOptions -} - -// ListCheckRunsResults represents the result of a check run list. -type ListCheckRunsResults struct { - Total *int `json:"total_count,omitempty"` - CheckRuns []*CheckRun `json:"check_runs,omitempty"` -} - -// ListCheckRunsForRef lists check runs for a specific ref. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/runs#list-check-runs-for-a-git-reference -func (s *ChecksService) ListCheckRunsForRef(ctx context.Context, owner, repo, ref string, opts *ListCheckRunsOptions) (*ListCheckRunsResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/check-runs", owner, repo, refURLEscape(ref)) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkRunResults *ListCheckRunsResults - resp, err := s.client.Do(ctx, req, &checkRunResults) - if err != nil { - return nil, resp, err - } - - return checkRunResults, resp, nil -} - -// ListCheckRunsCheckSuite lists check runs for a check suite. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/runs#list-check-runs-in-a-check-suite -func (s *ChecksService) ListCheckRunsCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64, opts *ListCheckRunsOptions) (*ListCheckRunsResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/%v/check-runs", owner, repo, checkSuiteID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkRunResults *ListCheckRunsResults - resp, err := s.client.Do(ctx, req, &checkRunResults) - if err != nil { - return nil, resp, err - } - - return checkRunResults, resp, nil -} - -// ReRequestCheckRun triggers GitHub to rerequest an existing check run. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/runs#rerequest-a-check-run -func (s *ChecksService) ReRequestCheckRun(ctx context.Context, owner, repo string, checkRunID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-runs/%v/rerequest", owner, repo, checkRunID) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ListCheckSuiteOptions represents parameters to list check suites. -type ListCheckSuiteOptions struct { - CheckName *string `url:"check_name,omitempty"` // Filters checks suites by the name of the check run. - AppID *int `url:"app_id,omitempty"` // Filters check suites by GitHub App id. - - ListOptions -} - -// ListCheckSuiteResults represents the result of a check run list. -type ListCheckSuiteResults struct { - Total *int `json:"total_count,omitempty"` - CheckSuites []*CheckSuite `json:"check_suites,omitempty"` -} - -// ListCheckSuitesForRef lists check suite for a specific ref. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/suites#list-check-suites-for-a-git-reference -func (s *ChecksService) ListCheckSuitesForRef(ctx context.Context, owner, repo, ref string, opts *ListCheckSuiteOptions) (*ListCheckSuiteResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/check-suites", owner, repo, refURLEscape(ref)) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkSuiteResults *ListCheckSuiteResults - resp, err := s.client.Do(ctx, req, &checkSuiteResults) - if err != nil { - return nil, resp, err - } - - return checkSuiteResults, resp, nil -} - -// AutoTriggerCheck enables or disables automatic creation of CheckSuite events upon pushes to the repository. -type AutoTriggerCheck struct { - AppID *int64 `json:"app_id,omitempty"` // The id of the GitHub App. (Required.) - Setting *bool `json:"setting,omitempty"` // Set to "true" to enable automatic creation of CheckSuite events upon pushes to the repository, or "false" to disable them. Default: "true" (Required.) -} - -// CheckSuitePreferenceOptions set options for check suite preferences for a repository. -type CheckSuitePreferenceOptions struct { - AutoTriggerChecks []*AutoTriggerCheck `json:"auto_trigger_checks,omitempty"` // A slice of auto trigger checks that can be set for a check suite in a repository. -} - -// CheckSuitePreferenceResults represents the results of the preference set operation. -type CheckSuitePreferenceResults struct { - Preferences *PreferenceList `json:"preferences,omitempty"` - Repository *Repository `json:"repository,omitempty"` -} - -// PreferenceList represents a list of auto trigger checks for repository -type PreferenceList struct { - AutoTriggerChecks []*AutoTriggerCheck `json:"auto_trigger_checks,omitempty"` // A slice of auto trigger checks that can be set for a check suite in a repository. -} - -// SetCheckSuitePreferences changes the default automatic flow when creating check suites. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/suites#update-repository-preferences-for-check-suites -func (s *ChecksService) SetCheckSuitePreferences(ctx context.Context, owner, repo string, opts CheckSuitePreferenceOptions) (*CheckSuitePreferenceResults, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/preferences", owner, repo) - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - var checkSuitePrefResults *CheckSuitePreferenceResults - resp, err := s.client.Do(ctx, req, &checkSuitePrefResults) - if err != nil { - return nil, resp, err - } - - return checkSuitePrefResults, resp, nil -} - -// CreateCheckSuiteOptions sets up parameters to manually create a check suites -type CreateCheckSuiteOptions struct { - HeadSHA string `json:"head_sha"` // The sha of the head commit. (Required.) - HeadBranch *string `json:"head_branch,omitempty"` // The name of the head branch where the code changes are implemented. -} - -// CreateCheckSuite manually creates a check suite for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/suites#create-a-check-suite -func (s *ChecksService) CreateCheckSuite(ctx context.Context, owner, repo string, opts CreateCheckSuiteOptions) (*CheckSuite, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites", owner, repo) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - checkSuite := new(CheckSuite) - resp, err := s.client.Do(ctx, req, checkSuite) - if err != nil { - return nil, resp, err - } - - return checkSuite, resp, nil -} - -// ReRequestCheckSuite triggers GitHub to rerequest an existing check suite, without pushing new code to a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/checks/suites#rerequest-a-check-suite -func (s *ChecksService) ReRequestCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/check-suites/%v/rerequest", owner, repo, checkSuiteID) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", mediaTypeCheckRunsPreview) - - resp, err := s.client.Do(ctx, req, nil) - return resp, err -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/code-scanning.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/code-scanning.go deleted file mode 100644 index e4a6abeba3..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/code-scanning.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strconv" - "strings" -) - -// CodeScanningService handles communication with the code scanning related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning -type CodeScanningService service - -// Rule represents the complete details of GitHub Code Scanning alert type. -type Rule struct { - ID *string `json:"id,omitempty"` - Severity *string `json:"severity,omitempty"` - Description *string `json:"description,omitempty"` - Name *string `json:"name,omitempty"` - SecuritySeverityLevel *string `json:"security_severity_level,omitempty"` - FullDescription *string `json:"full_description,omitempty"` - Tags []string `json:"tags,omitempty"` - Help *string `json:"help,omitempty"` -} - -// Location represents the exact location of the GitHub Code Scanning Alert in the scanned project. -type Location struct { - Path *string `json:"path,omitempty"` - StartLine *int `json:"start_line,omitempty"` - EndLine *int `json:"end_line,omitempty"` - StartColumn *int `json:"start_column,omitempty"` - EndColumn *int `json:"end_column,omitempty"` -} - -// Message is a part of MostRecentInstance struct which provides the appropriate message when any action is performed on the analysis object. -type Message struct { - Text *string `json:"text,omitempty"` -} - -// MostRecentInstance provides details of the most recent instance of this alert for the default branch or for the specified Git reference. -type MostRecentInstance struct { - Ref *string `json:"ref,omitempty"` - AnalysisKey *string `json:"analysis_key,omitempty"` - Environment *string `json:"environment,omitempty"` - State *string `json:"state,omitempty"` - CommitSHA *string `json:"commit_sha,omitempty"` - Message *Message `json:"message,omitempty"` - Location *Location `json:"location,omitempty"` - Classifications []string `json:"classifications,omitempty"` -} - -// Tool represents the tool used to generate a GitHub Code Scanning Alert. -type Tool struct { - Name *string `json:"name,omitempty"` - GUID *string `json:"guid,omitempty"` - Version *string `json:"version,omitempty"` -} - -// Alert represents an individual GitHub Code Scanning Alert on a single repository. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning -type Alert struct { - Number *int `json:"number,omitempty"` - Repository *Repository `json:"repository,omitempty"` - RuleID *string `json:"rule_id,omitempty"` - RuleSeverity *string `json:"rule_severity,omitempty"` - RuleDescription *string `json:"rule_description,omitempty"` - Rule *Rule `json:"rule,omitempty"` - Tool *Tool `json:"tool,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - FixedAt *Timestamp `json:"fixed_at,omitempty"` - State *string `json:"state,omitempty"` - ClosedBy *User `json:"closed_by,omitempty"` - ClosedAt *Timestamp `json:"closed_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - MostRecentInstance *MostRecentInstance `json:"most_recent_instance,omitempty"` - Instances []*MostRecentInstance `json:"instances,omitempty"` - DismissedBy *User `json:"dismissed_by,omitempty"` - DismissedAt *Timestamp `json:"dismissed_at,omitempty"` - DismissedReason *string `json:"dismissed_reason,omitempty"` - DismissedComment *string `json:"dismissed_comment,omitempty"` - InstancesURL *string `json:"instances_url,omitempty"` -} - -// ID returns the ID associated with an alert. It is the number at the end of the security alert's URL. -func (a *Alert) ID() int64 { - if a == nil { - return 0 - } - - s := a.GetHTMLURL() - - // Check for an ID to parse at the end of the url - if i := strings.LastIndex(s, "/"); i >= 0 { - s = s[i+1:] - } - - // Return the alert ID as a 64-bit integer. Unable to convert or out of range returns 0. - id, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0 - } - - return id -} - -// AlertListOptions specifies optional parameters to the CodeScanningService.ListAlerts -// method. -type AlertListOptions struct { - // State of the code scanning alerts to list. Set to closed to list only closed code scanning alerts. Default: open - State string `url:"state,omitempty"` - - // Return code scanning alerts for a specific branch reference. The ref must be formatted as heads/. - Ref string `url:"ref,omitempty"` - - ListCursorOptions - - // Add ListOptions so offset pagination with integer type "page" query parameter is accepted - // since ListCursorOptions accepts "page" as string only. - ListOptions -} - -// AnalysesListOptions specifies optional parameters to the CodeScanningService.ListAnalysesForRepo method. -type AnalysesListOptions struct { - // Return code scanning analyses belonging to the same SARIF upload. - SarifID *string `url:"sarif_id,omitempty"` - - // Return code scanning analyses for a specific branch reference. The ref can be formatted as refs/heads/ or simply . - Ref *string `url:"ref,omitempty"` - - ListOptions -} - -// ScanningAnalysis represents an individual GitHub Code Scanning ScanningAnalysis on a single repository. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning -type ScanningAnalysis struct { - ID *int64 `json:"id,omitempty"` - Ref *string `json:"ref,omitempty"` - CommitSHA *string `json:"commit_sha,omitempty"` - AnalysisKey *string `json:"analysis_key,omitempty"` - Environment *string `json:"environment,omitempty"` - Error *string `json:"error,omitempty"` - Category *string `json:"category,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - ResultsCount *int `json:"results_count,omitempty"` - RulesCount *int `json:"rules_count,omitempty"` - URL *string `json:"url,omitempty"` - SarifID *string `json:"sarif_id,omitempty"` - Tool *Tool `json:"tool,omitempty"` - Deletable *bool `json:"deletable,omitempty"` - Warning *string `json:"warning,omitempty"` -} - -// SarifAnalysis specifies the results of a code scanning job. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning -type SarifAnalysis struct { - CommitSHA *string `json:"commit_sha,omitempty"` - Ref *string `json:"ref,omitempty"` - Sarif *string `json:"sarif,omitempty"` - CheckoutURI *string `json:"checkout_uri,omitempty"` - StartedAt *Timestamp `json:"started_at,omitempty"` - ToolName *string `json:"tool_name,omitempty"` -} - -// CodeScanningAlertState specifies the state of a code scanning alert. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning -type CodeScanningAlertState struct { - // State sets the state of the code scanning alert and is a required field. - // You must also provide DismissedReason when you set the state to "dismissed". - // State can be one of: "open", "dismissed". - State string `json:"state"` - // DismissedReason represents the reason for dismissing or closing the alert. - // It is required when the state is "dismissed". - // It can be one of: "false positive", "won't fix", "used in tests". - DismissedReason *string `json:"dismissed_reason,omitempty"` - // DismissedComment is associated with the dismissal of the alert. - DismissedComment *string `json:"dismissed_comment,omitempty"` -} - -// SarifID identifies a sarif analysis upload. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning -type SarifID struct { - ID *string `json:"id,omitempty"` - URL *string `json:"url,omitempty"` -} - -// ListAlertsForOrg lists code scanning alerts for an org. -// -// You must use an access token with the security_events scope to use this endpoint. GitHub Apps must have the security_events -// read permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#list-code-scanning-alerts-for-an-organization -func (s *CodeScanningService) ListAlertsForOrg(ctx context.Context, org string, opts *AlertListOptions) ([]*Alert, *Response, error) { - u := fmt.Sprintf("orgs/%v/code-scanning/alerts", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var alerts []*Alert - resp, err := s.client.Do(ctx, req, &alerts) - if err != nil { - return nil, resp, err - } - - return alerts, resp, nil -} - -// ListAlertsForRepo lists code scanning alerts for a repository. -// -// Lists all open code scanning alerts for the default branch (usually master) and protected branches in a repository. -// You must use an access token with the security_events scope to use this endpoint. GitHub Apps must have the security_events -// read permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#list-code-scanning-alerts-for-a-repository -func (s *CodeScanningService) ListAlertsForRepo(ctx context.Context, owner, repo string, opts *AlertListOptions) ([]*Alert, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/code-scanning/alerts", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var alerts []*Alert - resp, err := s.client.Do(ctx, req, &alerts) - if err != nil { - return nil, resp, err - } - - return alerts, resp, nil -} - -// GetAlert gets a single code scanning alert for a repository. -// -// You must use an access token with the security_events scope to use this endpoint. -// GitHub Apps must have the security_events read permission to use this endpoint. -// -// The security alert_id is the number at the end of the security alert's URL. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#get-a-code-scanning-alert -func (s *CodeScanningService) GetAlert(ctx context.Context, owner, repo string, id int64) (*Alert, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/code-scanning/alerts/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - a := new(Alert) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// UpdateAlert updates the state of a single code scanning alert for a repository. -// -// You must use an access token with the security_events scope to use this endpoint. -// GitHub Apps must have the security_events read permission to use this endpoint. -// -// The security alert_id is the number at the end of the security alert's URL. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning?apiVersion=2022-11-28#update-a-code-scanning-alert -func (s *CodeScanningService) UpdateAlert(ctx context.Context, owner, repo string, id int64, stateInfo *CodeScanningAlertState) (*Alert, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/code-scanning/alerts/%v", owner, repo, id) - - req, err := s.client.NewRequest("PATCH", u, stateInfo) - if err != nil { - return nil, nil, err - } - - a := new(Alert) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// UploadSarif uploads the result of code scanning job to GitHub. -// -// For the parameter sarif, you must first compress your SARIF file using gzip and then translate the contents of the file into a Base64 encoding string. -// You must use an access token with the security_events scope to use this endpoint. GitHub Apps must have the security_events -// write permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#upload-an-analysis-as-sarif-data -func (s *CodeScanningService) UploadSarif(ctx context.Context, owner, repo string, sarif *SarifAnalysis) (*SarifID, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/code-scanning/sarifs", owner, repo) - - req, err := s.client.NewRequest("POST", u, sarif) - if err != nil { - return nil, nil, err - } - - sarifID := new(SarifID) - resp, err := s.client.Do(ctx, req, sarifID) - if err != nil { - return nil, resp, err - } - - return sarifID, resp, nil -} - -// ListAnalysesForRepo lists code scanning analyses for a repository. -// -// Lists the details of all code scanning analyses for a repository, starting with the most recent. -// You must use an access token with the security_events scope to use this endpoint. -// GitHub Apps must have the security_events read permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#list-code-scanning-analyses-for-a-repository -func (s *CodeScanningService) ListAnalysesForRepo(ctx context.Context, owner, repo string, opts *AnalysesListOptions) ([]*ScanningAnalysis, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/code-scanning/analyses", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var analyses []*ScanningAnalysis - resp, err := s.client.Do(ctx, req, &analyses) - if err != nil { - return nil, resp, err - } - - return analyses, resp, nil -} - -// GetAnalysis gets a single code scanning analysis for a repository. -// -// You must use an access token with the security_events scope to use this endpoint. -// GitHub Apps must have the security_events read permission to use this endpoint. -// -// The security analysis_id is the ID of the analysis, as returned from the ListAnalysesForRepo operation. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#get-a-code-scanning-analysis-for-a-repository -func (s *CodeScanningService) GetAnalysis(ctx context.Context, owner, repo string, id int64) (*ScanningAnalysis, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/code-scanning/analyses/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - analysis := new(ScanningAnalysis) - resp, err := s.client.Do(ctx, req, analysis) - if err != nil { - return nil, resp, err - } - - return analysis, resp, nil -} - -// DefaultSetupConfiguration represents a code scanning default setup configuration. -type DefaultSetupConfiguration struct { - State *string `json:"state,omitempty"` - Languages []string `json:"languages,omitempty"` - QuerySuite *string `json:"query_suite,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -// GetDefaultSetupConfiguration gets a code scanning default setup configuration. -// -// You must use an access token with the repo scope to use this -// endpoint with private repos or the public_repo scope for public repos. GitHub Apps must have the repo write -// permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#get-a-code-scanning-default-setup-configuration -func (s *CodeScanningService) GetDefaultSetupConfiguration(ctx context.Context, owner, repo string) (*DefaultSetupConfiguration, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/code-scanning/default-setup", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - cfg := new(DefaultSetupConfiguration) - resp, err := s.client.Do(ctx, req, cfg) - if err != nil { - return nil, resp, err - } - - return cfg, resp, nil -} - -// UpdateDefaultSetupConfigurationOptions specifies parameters to the CodeScanningService.UpdateDefaultSetupConfiguration -// method. -type UpdateDefaultSetupConfigurationOptions struct { - State string `json:"state"` - QuerySuite *string `json:"query_suite,omitempty"` - Languages []string `json:"languages,omitempty"` -} - -// UpdateDefaultSetupConfigurationResponse represents a response from updating a code scanning default setup configuration. -type UpdateDefaultSetupConfigurationResponse struct { - RunID *int64 `json:"run_id,omitempty"` - RunURL *string `json:"run_url,omitempty"` -} - -// UpdateDefaultSetupConfiguration updates a code scanning default setup configuration. -// -// You must use an access token with the repo scope to use this -// endpoint with private repos or the public_repo scope for public repos. GitHub Apps must have the repo write -// permission to use this endpoint. -// -// This method might return an AcceptedError and a status code of 202. This is because this is the status that GitHub -// returns to signify that it has now scheduled the update of the pull request branch in a background task. -// -// GitHub API docs: https://docs.github.com/en/rest/code-scanning#update-a-code-scanning-default-setup-configuration -func (s *CodeScanningService) UpdateDefaultSetupConfiguration(ctx context.Context, owner, repo string, options *UpdateDefaultSetupConfigurationOptions) (*UpdateDefaultSetupConfigurationResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/code-scanning/default-setup", owner, repo) - - req, err := s.client.NewRequest("PATCH", u, options) - if err != nil { - return nil, nil, err - } - - a := new(UpdateDefaultSetupConfigurationResponse) - resp, err := s.client.Do(ctx, req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/codespaces.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/codespaces.go deleted file mode 100644 index a260c227de..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/codespaces.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// CodespacesService handles communication with the Codespaces related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/ -type CodespacesService service - -// Codespace represents a codespace. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces -type Codespace struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - DisplayName *string `json:"display_name,omitempty"` - EnvironmentID *string `json:"environment_id,omitempty"` - Owner *User `json:"owner,omitempty"` - BillableOwner *User `json:"billable_owner,omitempty"` - Repository *Repository `json:"repository,omitempty"` - Machine *CodespacesMachine `json:"machine,omitempty"` - DevcontainerPath *string `json:"devcontainer_path,omitempty"` - Prebuild *bool `json:"prebuild,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - LastUsedAt *Timestamp `json:"last_used_at,omitempty"` - State *string `json:"state,omitempty"` - URL *string `json:"url,omitempty"` - GitStatus *CodespacesGitStatus `json:"git_status,omitempty"` - Location *string `json:"location,omitempty"` - IdleTimeoutMinutes *int `json:"idle_timeout_minutes,omitempty"` - WebURL *string `json:"web_url,omitempty"` - MachinesURL *string `json:"machines_url,omitempty"` - StartURL *string `json:"start_url,omitempty"` - StopURL *string `json:"stop_url,omitempty"` - PullsURL *string `json:"pulls_url,omitempty"` - RecentFolders []string `json:"recent_folders,omitempty"` - RuntimeConstraints *CodespacesRuntimeConstraints `json:"runtime_constraints,omitempty"` - PendingOperation *bool `json:"pending_operation,omitempty"` - PendingOperationDisabledReason *string `json:"pending_operation_disabled_reason,omitempty"` - IdleTimeoutNotice *string `json:"idle_timeout_notice,omitempty"` - RetentionPeriodMinutes *int `json:"retention_period_minutes,omitempty"` - RetentionExpiresAt *Timestamp `json:"retention_expires_at,omitempty"` - LastKnownStopNotice *string `json:"last_known_stop_notice,omitempty"` -} - -// CodespacesGitStatus represents the git status of a codespace. -type CodespacesGitStatus struct { - Ahead *int `json:"ahead,omitempty"` - Behind *int `json:"behind,omitempty"` - HasUnpushedChanges *bool `json:"has_unpushed_changes,omitempty"` - HasUncommittedChanges *bool `json:"has_uncommitted_changes,omitempty"` - Ref *string `json:"ref,omitempty"` -} - -// CodespacesMachine represents the machine type of a codespace. -type CodespacesMachine struct { - Name *string `json:"name,omitempty"` - DisplayName *string `json:"display_name,omitempty"` - OperatingSystem *string `json:"operating_system,omitempty"` - StorageInBytes *int64 `json:"storage_in_bytes,omitempty"` - MemoryInBytes *int64 `json:"memory_in_bytes,omitempty"` - CPUs *int `json:"cpus,omitempty"` - PrebuildAvailability *string `json:"prebuild_availability,omitempty"` -} - -// CodespacesRuntimeConstraints represents the runtime constraints of a codespace. -type CodespacesRuntimeConstraints struct { - AllowedPortPrivacySettings []string `json:"allowed_port_privacy_settings,omitempty"` -} - -// ListCodespaces represents the response from the list codespaces endpoints. -type ListCodespaces struct { - TotalCount *int `json:"total_count,omitempty"` - Codespaces []*Codespace `json:"codespaces"` -} - -// ListInRepo lists codespaces for a user in a repository. -// -// Lists the codespaces associated with a specified repository and the authenticated user. -// You must authenticate using an access token with the codespace scope to use this endpoint. -// GitHub Apps must have read access to the codespaces repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#list-codespaces-in-a-repository-for-the-authenticated-user -func (s *CodespacesService) ListInRepo(ctx context.Context, owner, repo string, opts *ListOptions) (*ListCodespaces, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/codespaces", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var codespaces *ListCodespaces - resp, err := s.client.Do(ctx, req, &codespaces) - if err != nil { - return nil, resp, err - } - - return codespaces, resp, nil -} - -// ListOptions represents the options for listing codespaces for a user. -type ListCodespacesOptions struct { - ListOptions - RepositoryID int64 `url:"repository_id,omitempty"` -} - -// List lists codespaces for an authenticated user. -// -// Lists the authenticated user's codespaces. -// You must authenticate using an access token with the codespace scope to use this endpoint. -// GitHub Apps must have read access to the codespaces repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#list-codespaces-for-the-authenticated-user -func (s *CodespacesService) List(ctx context.Context, opts *ListCodespacesOptions) (*ListCodespaces, *Response, error) { - u := fmt.Sprint("user/codespaces") - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var codespaces *ListCodespaces - resp, err := s.client.Do(ctx, req, &codespaces) - if err != nil { - return nil, resp, err - } - - return codespaces, resp, nil -} - -// CreateCodespaceOptions represents options for the creation of a codespace in a repository. -type CreateCodespaceOptions struct { - Ref *string `json:"ref,omitempty"` - // Geo represents the geographic area for this codespace. - // If not specified, the value is assigned by IP. - // This property replaces location, which is being deprecated. - // Geo can be one of: `EuropeWest`, `SoutheastAsia`, `UsEast`, `UsWest`. - Geo *string `json:"geo,omitempty"` - ClientIP *string `json:"client_ip,omitempty"` - Machine *string `json:"machine,omitempty"` - DevcontainerPath *string `json:"devcontainer_path,omitempty"` - MultiRepoPermissionsOptOut *bool `json:"multi_repo_permissions_opt_out,omitempty"` - WorkingDirectory *string `json:"working_directory,omitempty"` - IdleTimeoutMinutes *int `json:"idle_timeout_minutes,omitempty"` - DisplayName *string `json:"display_name,omitempty"` - // RetentionPeriodMinutes represents the duration in minutes after codespace has gone idle in which it will be deleted. - // Must be integer minutes between 0 and 43200 (30 days). - RetentionPeriodMinutes *int `json:"retention_period_minutes,omitempty"` -} - -// CreateInRepo creates a codespace in a repository. -// -// Creates a codespace owned by the authenticated user in the specified repository. -// You must authenticate using an access token with the codespace scope to use this endpoint. -// GitHub Apps must have write access to the codespaces repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#create-a-codespace-in-a-repository -func (s *CodespacesService) CreateInRepo(ctx context.Context, owner, repo string, request *CreateCodespaceOptions) (*Codespace, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/codespaces", owner, repo) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - var codespace *Codespace - resp, err := s.client.Do(ctx, req, &codespace) - if err != nil { - return nil, resp, err - } - - return codespace, resp, nil -} - -// Start starts a codespace. -// -// You must authenticate using an access token with the codespace scope to use this endpoint. -// GitHub Apps must have write access to the codespaces_lifecycle_admin repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#start-a-codespace-for-the-authenticated-user -func (s *CodespacesService) Start(ctx context.Context, codespaceName string) (*Codespace, *Response, error) { - u := fmt.Sprintf("user/codespaces/%v/start", codespaceName) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - var codespace *Codespace - resp, err := s.client.Do(ctx, req, &codespace) - if err != nil { - return nil, resp, err - } - - return codespace, resp, nil -} - -// Stop stops a codespace. -// -// You must authenticate using an access token with the codespace scope to use this endpoint. -// GitHub Apps must have write access to the codespaces_lifecycle_admin repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#stop-a-codespace-for-the-authenticated-user -func (s *CodespacesService) Stop(ctx context.Context, codespaceName string) (*Codespace, *Response, error) { - u := fmt.Sprintf("user/codespaces/%v/stop", codespaceName) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - var codespace *Codespace - resp, err := s.client.Do(ctx, req, &codespace) - if err != nil { - return nil, resp, err - } - - return codespace, resp, nil -} - -// Delete deletes a codespace. -// -// You must authenticate using an access token with the codespace scope to use this endpoint. -// GitHub Apps must have write access to the codespaces repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#delete-a-codespace-for-the-authenticated-user -func (s *CodespacesService) Delete(ctx context.Context, codespaceName string) (*Response, error) { - u := fmt.Sprintf("user/codespaces/%v", codespaceName) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go deleted file mode 100644 index e11c679c66..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListUserSecrets list all secrets available for a users codespace -// -// Lists all secrets available for a user's Codespaces without revealing their encrypted values -// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint -// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#list-secrets-for-the-authenticated-user -func (s *CodespacesService) ListUserSecrets(ctx context.Context, opts *ListOptions) (*Secrets, *Response, error) { - u, err := addOptions("user/codespaces/secrets", opts) - if err != nil { - return nil, nil, err - } - return s.listSecrets(ctx, u) -} - -// ListOrgSecrets list all secrets available to an org -// -// Lists all Codespaces secrets available at the organization-level without revealing their encrypted values. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#list-organization-secrets -func (s *CodespacesService) ListOrgSecrets(ctx context.Context, org string, opts *ListOptions) (*Secrets, *Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - return s.listSecrets(ctx, u) -} - -// ListRepoSecrets list all secrets available to a repo -// -// Lists all secrets available in a repository without revealing their encrypted values. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#list-repository-secrets -func (s *CodespacesService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/codespaces/secrets", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - return s.listSecrets(ctx, u) -} - -func (s *CodespacesService) listSecrets(ctx context.Context, url string) (*Secrets, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - var secrets *Secrets - resp, err := s.client.Do(ctx, req, &secrets) - if err != nil { - return nil, resp, err - } - - return secrets, resp, nil -} - -// GetUserPublicKey gets the users public key for encrypting codespace secrets -// -// Gets your public key, which you need to encrypt secrets. You need to encrypt a secret before you can create or update secrets. -// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. -// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#get-public-key-for-the-authenticated-user -func (s *CodespacesService) GetUserPublicKey(ctx context.Context) (*PublicKey, *Response, error) { - return s.getPublicKey(ctx, "user/codespaces/secrets/public-key") -} - -// GetOrgPublicKey gets the org public key for encrypting codespace secrets -// -// Gets a public key for an organization, which is required in order to encrypt secrets. You need to encrypt the value of a secret before you can create or update secrets. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#get-an-organization-public-key -func (s *CodespacesService) GetOrgPublicKey(ctx context.Context, org string) (*PublicKey, *Response, error) { - return s.getPublicKey(ctx, fmt.Sprintf("orgs/%v/codespaces/secrets/public-key", org)) -} - -// GetRepoPublicKey gets the repo public key for encrypting codespace secrets -// -// Gets your public key, which you need to encrypt secrets. You need to encrypt a secret before you can create or update secrets. Anyone with read access to the repository can use this endpoint. If the repository is private you must use an access token with the repo scope. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#get-a-repository-public-key -func (s *CodespacesService) GetRepoPublicKey(ctx context.Context, owner, repo string) (*PublicKey, *Response, error) { - return s.getPublicKey(ctx, fmt.Sprintf("repos/%v/%v/codespaces/secrets/public-key", owner, repo)) -} - -func (s *CodespacesService) getPublicKey(ctx context.Context, url string) (*PublicKey, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - var publicKey *PublicKey - resp, err := s.client.Do(ctx, req, &publicKey) - if err != nil { - return nil, resp, err - } - - return publicKey, resp, nil -} - -// GetUserSecret gets a users codespace secret -// -// Gets a secret available to a user's codespaces without revealing its encrypted value. -// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. -// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#get-a-secret-for-the-authenticated-user -func (s *CodespacesService) GetUserSecret(ctx context.Context, name string) (*Secret, *Response, error) { - u := fmt.Sprintf("user/codespaces/secrets/%v", name) - return s.getSecret(ctx, u) -} - -// GetOrgSecret gets an org codespace secret -// -// Gets an organization secret without revealing its encrypted value. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#get-an-organization-secret -func (s *CodespacesService) GetOrgSecret(ctx context.Context, org, name string) (*Secret, *Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, name) - return s.getSecret(ctx, u) -} - -// GetRepoSecret gets a repo codespace secret -// -// Gets a single repository secret without revealing its encrypted value. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#get-a-repository-secret -func (s *CodespacesService) GetRepoSecret(ctx context.Context, owner, repo, name string) (*Secret, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, name) - return s.getSecret(ctx, u) -} - -func (s *CodespacesService) getSecret(ctx context.Context, url string) (*Secret, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - var secret *Secret - resp, err := s.client.Do(ctx, req, &secret) - if err != nil { - return nil, resp, err - } - - return secret, resp, nil -} - -// CreateOrUpdateUserSecret creates or updates a users codespace secret -// -// Creates or updates a secret for a user's codespace with an encrypted value. Encrypt your secret using LibSodium. -// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must also have Codespaces access to use this endpoint. -// GitHub Apps must have write access to the codespaces_user_secrets user permission and codespaces_secrets repository permission on all referenced repositories to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#create-or-update-a-secret-for-the-authenticated-user -func (s *CodespacesService) CreateOrUpdateUserSecret(ctx context.Context, eSecret *EncryptedSecret) (*Response, error) { - u := fmt.Sprintf("user/codespaces/secrets/%v", eSecret.Name) - return s.createOrUpdateSecret(ctx, u, eSecret) -} - -// CreateOrUpdateOrgSecret creates or updates an orgs codespace secret -// -// Creates or updates an organization secret with an encrypted value. Encrypt your secret using LibSodium. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#create-or-update-an-organization-secret -func (s *CodespacesService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *EncryptedSecret) (*Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, eSecret.Name) - return s.createOrUpdateSecret(ctx, u, eSecret) -} - -// CreateOrUpdateRepoSecret creates or updates a repos codespace secret -// -// Creates or updates a repository secret with an encrypted value. Encrypt your secret using LibSodium. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#create-or-update-a-repository-secret -func (s *CodespacesService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *EncryptedSecret) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, eSecret.Name) - return s.createOrUpdateSecret(ctx, u, eSecret) -} - -func (s *CodespacesService) createOrUpdateSecret(ctx context.Context, url string, eSecret *EncryptedSecret) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, eSecret) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// DeleteUserSecret deletes a users codespace secret -// -// Deletes a secret from a user's codespaces using the secret name. Deleting the secret will remove access from all codespaces that were allowed to access the secret. -// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. -// GitHub Apps must have write access to the codespaces_user_secrets user permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#delete-a-secret-for-the-authenticated-user -func (s *CodespacesService) DeleteUserSecret(ctx context.Context, name string) (*Response, error) { - u := fmt.Sprintf("user/codespaces/secrets/%v", name) - return s.deleteSecret(ctx, u) -} - -// DeleteOrgSecret deletes an orgs codespace secret -// -// Deletes an organization secret using the secret name. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#delete-an-organization-secret -func (s *CodespacesService) DeleteOrgSecret(ctx context.Context, org, name string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, name) - return s.deleteSecret(ctx, u) -} - -// DeleteRepoSecret deletes a repos codespace secret -// -// Deletes a secret in a repository using the secret name. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#delete-a-repository-secret -func (s *CodespacesService) DeleteRepoSecret(ctx context.Context, owner, repo, name string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, name) - return s.deleteSecret(ctx, u) -} - -func (s *CodespacesService) deleteSecret(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// ListSelectedReposForUserSecret lists the repositories that have been granted the ability to use a user's codespace secret. -// -// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. -// GitHub Apps must have read access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on all referenced repositories to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#list-selected-repositories-for-a-user-secret -func (s *CodespacesService) ListSelectedReposForUserSecret(ctx context.Context, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { - u := fmt.Sprintf("user/codespaces/secrets/%v/repositories", name) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - return s.listSelectedReposForSecret(ctx, u) -} - -// ListSelectedReposForOrgSecret lists the repositories that have been granted the ability to use an organization's codespace secret. -// -// Lists all repositories that have been selected when the visibility for repository access to a secret is set to selected. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#list-selected-repositories-for-an-organization-secret -func (s *CodespacesService) ListSelectedReposForOrgSecret(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories", org, name) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - return s.listSelectedReposForSecret(ctx, u) -} - -func (s *CodespacesService) listSelectedReposForSecret(ctx context.Context, url string) (*SelectedReposList, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - var repositories *SelectedReposList - resp, err := s.client.Do(ctx, req, &repositories) - if err != nil { - return nil, resp, err - } - - return repositories, resp, nil -} - -// SetSelectedReposForUserSecret sets the repositories that have been granted the ability to use a user's codespace secret. -// -// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. -// GitHub Apps must have write access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on all referenced repositories to use this endpoint. -// -// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#set-selected-repositories-for-a-user-secret -func (s *CodespacesService) SetSelectedReposForUserSecret(ctx context.Context, name string, ids SelectedRepoIDs) (*Response, error) { - u := fmt.Sprintf("user/codespaces/secrets/%v/repositories", name) - return s.setSelectedRepoForSecret(ctx, u, ids) -} - -// SetSelectedReposForOrgSecret sets the repositories that have been granted the ability to use a user's codespace secret. -// -// Replaces all repositories for an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#set-selected-repositories-for-a-user-secret -func (s *CodespacesService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories", org, name) - return s.setSelectedRepoForSecret(ctx, u, ids) -} - -func (s *CodespacesService) setSelectedRepoForSecret(ctx context.Context, url string, ids SelectedRepoIDs) (*Response, error) { - type repoIDs struct { - SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` - } - - req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// AddSelectedRepoToUserSecret adds a repository to the list of repositories that have been granted the ability to use a user's codespace secret. -// -// Adds a repository to the selected repositories for a user's codespace secret. You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. GitHub Apps must have write access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on the referenced repository to use this endpoint. -// -// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#add-a-selected-repository-to-a-user-secret -func (s *CodespacesService) AddSelectedRepoToUserSecret(ctx context.Context, name string, repo *Repository) (*Response, error) { - u := fmt.Sprintf("user/codespaces/secrets/%v/repositories/%v", name, *repo.ID) - return s.addSelectedRepoToSecret(ctx, u) -} - -// AddSelectedRepoToOrgSecret adds a repository to the list of repositories that have been granted the ability to use an organization's codespace secret. -// -// Adds a repository to an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// Github API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#add-selected-repository-to-an-organization-secret -func (s *CodespacesService) AddSelectedRepoToOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories/%v", org, name, *repo.ID) - return s.addSelectedRepoToSecret(ctx, u) -} - -func (s *CodespacesService) addSelectedRepoToSecret(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// RemoveSelectedRepoFromUserSecret removes a repository from the list of repositories that have been granted the ability to use a user's codespace secret. -// -// Removes a repository from the selected repositories for a user's codespace secret. You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. GitHub Apps must have write access to the codespaces_user_secrets user permission to use this endpoint. -// -// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#remove-a-selected-repository-from-a-user-secret -func (s *CodespacesService) RemoveSelectedRepoFromUserSecret(ctx context.Context, name string, repo *Repository) (*Response, error) { - u := fmt.Sprintf("user/codespaces/secrets/%v/repositories/%v", name, *repo.ID) - return s.removeSelectedRepoFromSecret(ctx, u) -} - -// RemoveSelectedRepoFromOrgSecret removes a repository from the list of repositories that have been granted the ability to use an organization's codespace secret. -// -// Removes a repository from an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint. -// -// Github API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#remove-selected-repository-from-an-organization-secret -func (s *CodespacesService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories/%v", org, name, *repo.ID) - return s.removeSelectedRepoFromSecret(ctx, u) -} - -func (s *CodespacesService) removeSelectedRepoFromSecret(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot.go deleted file mode 100644 index 07e68b506a..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// DependabotService handles communication with the Dependabot related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/ -type DependabotService service diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go deleted file mode 100644 index 7b5d53b393..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Dependency reprensents the vulnerable dependency. -type Dependency struct { - Package *VulnerabilityPackage `json:"package,omitempty"` - ManifestPath *string `json:"manifest_path,omitempty"` - Scope *string `json:"scope,omitempty"` -} - -// AdvisoryCVSs represents the advisory pertaining to the Common Vulnerability Scoring System. -type AdvisoryCVSs struct { - Score *float64 `json:"score,omitempty"` - VectorString *string `json:"vector_string,omitempty"` -} - -// AdvisoryCWEs reprensent the advisory pertaining to Common Weakness Enumeration. -type AdvisoryCWEs struct { - CWEID *string `json:"cwe_id,omitempty"` - Name *string `json:"name,omitempty"` -} - -// DependabotSecurityAdvisory represents the GitHub Security Advisory. -type DependabotSecurityAdvisory struct { - GHSAID *string `json:"ghsa_id,omitempty"` - CVEID *string `json:"cve_id,omitempty"` - Summary *string `json:"summary,omitempty"` - Description *string `json:"description,omitempty"` - Vulnerabilities []*AdvisoryVulnerability `json:"vulnerabilities,omitempty"` - Severity *string `json:"severity,omitempty"` - CVSs *AdvisoryCVSs `json:"cvss,omitempty"` - CWEs []*AdvisoryCWEs `json:"cwes,omitempty"` - Identifiers []*AdvisoryIdentifier `json:"identifiers,omitempty"` - References []*AdvisoryReference `json:"references,omitempty"` - PublishedAt *Timestamp `json:"published_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - WithdrawnAt *Timestamp `json:"withdrawn_at,omitempty"` -} - -// DependabotAlert represents a Dependabot alert. -type DependabotAlert struct { - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Dependency *Dependency `json:"dependency,omitempty"` - SecurityAdvisory *DependabotSecurityAdvisory `json:"security_advisory,omitempty"` - SecurityVulnerability *AdvisoryVulnerability `json:"security_vulnerability,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - DismissedAt *Timestamp `json:"dismissed_at,omitempty"` - DismissedBy *User `json:"dismissed_by,omitempty"` - DismissedReason *string `json:"dismissed_reason,omitempty"` - DismissedComment *string `json:"dismissed_comment,omitempty"` - FixedAt *Timestamp `json:"fixed_at,omitempty"` - Repository *Repository `json:"repository,omitempty"` -} - -// ListAlertsOptions specifies the optional parameters to the DependabotService.ListRepoAlerts -// and DependabotService.ListOrgAlerts methods. -type ListAlertsOptions struct { - State *string `url:"state,omitempty"` - Severity *string `url:"severity,omitempty"` - Ecosystem *string `url:"ecosystem,omitempty"` - Package *string `url:"package,omitempty"` - Scope *string `url:"scope,omitempty"` - Sort *string `url:"sort,omitempty"` - Direction *string `url:"direction,omitempty"` - - ListCursorOptions -} - -func (s *DependabotService) listAlerts(ctx context.Context, url string, opts *ListAlertsOptions) ([]*DependabotAlert, *Response, error) { - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var alerts []*DependabotAlert - resp, err := s.client.Do(ctx, req, &alerts) - if err != nil { - return nil, resp, err - } - - return alerts, resp, nil -} - -// ListRepoAlerts lists all Dependabot alerts of a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/alerts#list-dependabot-alerts-for-a-repository -func (s *DependabotService) ListRepoAlerts(ctx context.Context, owner, repo string, opts *ListAlertsOptions) ([]*DependabotAlert, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/dependabot/alerts", owner, repo) - return s.listAlerts(ctx, url, opts) -} - -// ListOrgAlerts lists all Dependabot alerts of an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/alerts#list-dependabot-alerts-for-an-organization -func (s *DependabotService) ListOrgAlerts(ctx context.Context, org string, opts *ListAlertsOptions) ([]*DependabotAlert, *Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/alerts", org) - return s.listAlerts(ctx, url, opts) -} - -// GetRepoAlert gets a single repository Dependabot alert. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/alerts#get-a-dependabot-alert -func (s *DependabotService) GetRepoAlert(ctx context.Context, owner, repo string, number int) (*DependabotAlert, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/dependabot/alerts/%v", owner, repo, number) - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - alert := new(DependabotAlert) - resp, err := s.client.Do(ctx, req, alert) - if err != nil { - return nil, resp, err - } - - return alert, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot_secrets.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot_secrets.go deleted file mode 100644 index f87ab42c39..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/dependabot_secrets.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -func (s *DependabotService) getPublicKey(ctx context.Context, url string) (*PublicKey, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - pubKey := new(PublicKey) - resp, err := s.client.Do(ctx, req, pubKey) - if err != nil { - return nil, resp, err - } - - return pubKey, resp, nil -} - -// GetRepoPublicKey gets a public key that should be used for Dependabot secret encryption. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-a-repository-public-key -func (s *DependabotService) GetRepoPublicKey(ctx context.Context, owner, repo string) (*PublicKey, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/public-key", owner, repo) - return s.getPublicKey(ctx, url) -} - -// GetOrgPublicKey gets a public key that should be used for Dependabot secret encryption. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-an-organization-public-key -func (s *DependabotService) GetOrgPublicKey(ctx context.Context, org string) (*PublicKey, *Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/public-key", org) - return s.getPublicKey(ctx, url) -} - -func (s *DependabotService) listSecrets(ctx context.Context, url string, opts *ListOptions) (*Secrets, *Response, error) { - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - secrets := new(Secrets) - resp, err := s.client.Do(ctx, req, &secrets) - if err != nil { - return nil, resp, err - } - - return secrets, resp, nil -} - -// ListRepoSecrets lists all Dependabot secrets available in a repository -// without revealing their encrypted values. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#list-repository-secrets -func (s *DependabotService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/dependabot/secrets", owner, repo) - return s.listSecrets(ctx, url, opts) -} - -// ListOrgSecrets lists all Dependabot secrets available in an organization -// without revealing their encrypted values. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#list-organization-secrets -func (s *DependabotService) ListOrgSecrets(ctx context.Context, org string, opts *ListOptions) (*Secrets, *Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets", org) - return s.listSecrets(ctx, url, opts) -} - -func (s *DependabotService) getSecret(ctx context.Context, url string) (*Secret, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - secret := new(Secret) - resp, err := s.client.Do(ctx, req, secret) - if err != nil { - return nil, resp, err - } - - return secret, resp, nil -} - -// GetRepoSecret gets a single repository Dependabot secret without revealing its encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-a-repository-secret -func (s *DependabotService) GetRepoSecret(ctx context.Context, owner, repo, name string) (*Secret, *Response, error) { - url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/%v", owner, repo, name) - return s.getSecret(ctx, url) -} - -// GetOrgSecret gets a single organization Dependabot secret without revealing its encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-an-organization-secret -func (s *DependabotService) GetOrgSecret(ctx context.Context, org, name string) (*Secret, *Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v", org, name) - return s.getSecret(ctx, url) -} - -// DependabotEncryptedSecret represents a secret that is encrypted using a public key for Dependabot. -// -// The value of EncryptedValue must be your secret, encrypted with -// LibSodium (see documentation here: https://libsodium.gitbook.io/doc/bindings_for_other_languages) -// using the public key retrieved using the GetPublicKey method. -type DependabotEncryptedSecret struct { - Name string `json:"-"` - KeyID string `json:"key_id"` - EncryptedValue string `json:"encrypted_value"` - Visibility string `json:"visibility,omitempty"` - SelectedRepositoryIDs DependabotSecretsSelectedRepoIDs `json:"selected_repository_ids,omitempty"` -} - -func (s *DependabotService) putSecret(ctx context.Context, url string, eSecret *DependabotEncryptedSecret) (*Response, error) { - req, err := s.client.NewRequest("PUT", url, eSecret) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// CreateOrUpdateRepoSecret creates or updates a repository Dependabot secret with an encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#create-or-update-a-repository-secret -func (s *DependabotService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *DependabotEncryptedSecret) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/%v", owner, repo, eSecret.Name) - return s.putSecret(ctx, url, eSecret) -} - -// CreateOrUpdateOrgSecret creates or updates an organization Dependabot secret with an encrypted value. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#create-or-update-an-organization-secret -func (s *DependabotService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *DependabotEncryptedSecret) (*Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v", org, eSecret.Name) - return s.putSecret(ctx, url, eSecret) -} - -func (s *DependabotService) deleteSecret(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteRepoSecret deletes a Dependabot secret in a repository using the secret name. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#delete-a-repository-secret -func (s *DependabotService) DeleteRepoSecret(ctx context.Context, owner, repo, name string) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/%v", owner, repo, name) - return s.deleteSecret(ctx, url) -} - -// DeleteOrgSecret deletes a Dependabot secret in an organization using the secret name. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#delete-an-organization-secret -func (s *DependabotService) DeleteOrgSecret(ctx context.Context, org, name string) (*Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v", org, name) - return s.deleteSecret(ctx, url) -} - -// ListSelectedReposForOrgSecret lists all repositories that have access to a Dependabot secret. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#list-selected-repositories-for-an-organization-secret -func (s *DependabotService) ListSelectedReposForOrgSecret(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories", org, name) - u, err := addOptions(url, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - result := new(SelectedReposList) - resp, err := s.client.Do(ctx, req, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// DependabotSecretsSelectedRepoIDs are the repository IDs that have access to the dependabot secrets. -type DependabotSecretsSelectedRepoIDs []int64 - -// SetSelectedReposForOrgSecret sets the repositories that have access to a Dependabot secret. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#set-selected-repositories-for-an-organization-secret -func (s *DependabotService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids DependabotSecretsSelectedRepoIDs) (*Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories", org, name) - type repoIDs struct { - SelectedIDs DependabotSecretsSelectedRepoIDs `json:"selected_repository_ids"` - } - - req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// AddSelectedRepoToOrgSecret adds a repository to an organization Dependabot secret. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#add-selected-repository-to-an-organization-secret -func (s *DependabotService) AddSelectedRepoToOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories/%v", org, name, *repo.ID) - req, err := s.client.NewRequest("PUT", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveSelectedRepoFromOrgSecret removes a repository from an organization Dependabot secret. -// -// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#remove-selected-repository-from-an-organization-secret -func (s *DependabotService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { - url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories/%v", org, name, *repo.ID) - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/doc.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/doc.go deleted file mode 100644 index af2847111a..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/doc.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package github provides a client for using the GitHub API. - -Usage: - - import "github.com/google/go-github/v53/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) - import "github.com/google/go-github/github" // with go modules disabled - -Construct a new GitHub client, then use the various services on the client to -access different parts of the GitHub API. For example: - - client := github.NewClient(nil) - - // list all organizations for user "willnorris" - orgs, _, err := client.Organizations.List(ctx, "willnorris", nil) - -Some API methods have optional parameters that can be passed. For example: - - client := github.NewClient(nil) - - // list public repositories for org "github" - opt := &github.RepositoryListByOrgOptions{Type: "public"} - repos, _, err := client.Repositories.ListByOrg(ctx, "github", opt) - -The services of a client divide the API into logical chunks and correspond to -the structure of the GitHub API documentation at -https://docs.github.com/en/rest . - -NOTE: Using the https://godoc.org/context package, one can easily -pass cancelation signals and deadlines to various services of the client for -handling a request. In case there is no context available, then context.Background() -can be used as a starting point. - -For more sample code snippets, head over to the https://github.com/google/go-github/tree/master/example directory. - -# Authentication - -The go-github library does not directly handle authentication. Instead, when -creating a new client, pass an http.Client that can handle authentication for -you. The easiest and recommended way to do this is using the golang.org/x/oauth2 -library, but you can always use any other library that provides an http.Client. -If you have an OAuth2 access token (for example, a personal API token), you can -use it with the oauth2 library using: - - import "golang.org/x/oauth2" - - func main() { - ctx := context.Background() - ts := oauth2.StaticTokenSource( - &oauth2.Token{AccessToken: "... your access token ..."}, - ) - tc := oauth2.NewClient(ctx, ts) - - client := github.NewClient(tc) - - // list all repositories for the authenticated user - repos, _, err := client.Repositories.List(ctx, "", nil) - } - -Note that when using an authenticated Client, all calls made by the client will -include the specified OAuth token. Therefore, authenticated clients should -almost never be shared between different users. - -See the oauth2 docs for complete instructions on using that library. - -For API methods that require HTTP Basic Authentication, use the -BasicAuthTransport. - -GitHub Apps authentication can be provided by the -https://github.com/bradleyfalzon/ghinstallation package. -It supports both authentication as an installation, using an installation access token, -and as an app, using a JWT. - -To authenticate as an installation: - - import "github.com/bradleyfalzon/ghinstallation" - - func main() { - // Wrap the shared transport for use with the integration ID 1 authenticating with installation ID 99. - itr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, 1, 99, "2016-10-19.private-key.pem") - if err != nil { - // Handle error. - } - - // Use installation transport with client - client := github.NewClient(&http.Client{Transport: itr}) - - // Use client... - } - -To authenticate as an app, using a JWT: - - import "github.com/bradleyfalzon/ghinstallation" - - func main() { - // Wrap the shared transport for use with the application ID 1. - atr, err := ghinstallation.NewAppsTransportKeyFromFile(http.DefaultTransport, 1, "2016-10-19.private-key.pem") - if err != nil { - // Handle error. - } - - // Use app transport with client - client := github.NewClient(&http.Client{Transport: atr}) - - // Use client... - } - -# Rate Limiting - -GitHub imposes a rate limit on all API clients. Unauthenticated clients are -limited to 60 requests per hour, while authenticated clients can make up to -5,000 requests per hour. The Search API has a custom rate limit. Unauthenticated -clients are limited to 10 requests per minute, while authenticated clients -can make up to 30 requests per minute. To receive the higher rate limit when -making calls that are not issued on behalf of a user, -use UnauthenticatedRateLimitedTransport. - -The returned Response.Rate value contains the rate limit information -from the most recent API call. If a recent enough response isn't -available, you can use RateLimits to fetch the most up-to-date rate -limit data for the client. - -To detect an API rate limit error, you can check if its type is *github.RateLimitError. -For secondary rate limits, you can check if its type is *github.AbuseRateLimitError: - - repos, _, err := client.Repositories.List(ctx, "", nil) - if _, ok := err.(*github.RateLimitError); ok { - log.Println("hit rate limit") - } - if _, ok := err.(*github.AbuseRateLimitError); ok { - log.Println("hit secondary rate limit") - } - -Learn more about GitHub rate limiting at -https://docs.github.com/en/rest/rate-limit . - -# Accepted Status - -Some endpoints may return a 202 Accepted status code, meaning that the -information required is not yet ready and was scheduled to be gathered on -the GitHub side. Methods known to behave like this are documented specifying -this behavior. - -To detect this condition of error, you can check if its type is -*github.AcceptedError: - - stats, _, err := client.Repositories.ListContributorsStats(ctx, org, repo) - if _, ok := err.(*github.AcceptedError); ok { - log.Println("scheduled on GitHub side") - } - -# Conditional Requests - -The GitHub API has good support for conditional requests which will help -prevent you from burning through your rate limit, as well as help speed up your -application. go-github does not handle conditional requests directly, but is -instead designed to work with a caching http.Transport. We recommend using -https://github.com/gregjones/httpcache for that. - -Learn more about GitHub conditional requests at -https://docs.github.com/en/rest/overview/resources-in-the-rest-api#conditional-requests. - -# Creating and Updating Resources - -All structs for GitHub resources use pointer values for all non-repeated fields. -This allows distinguishing between unset fields and those set to a zero-value. -Helper functions have been provided to easily create these pointers for string, -bool, and int values. For example: - - // create a new private repository named "foo" - repo := &github.Repository{ - Name: github.String("foo"), - Private: github.Bool(true), - } - client.Repositories.Create(ctx, "", repo) - -Users who have worked with protocol buffers should find this pattern familiar. - -# Pagination - -All requests for resource collections (repos, pull requests, issues, etc.) -support pagination. Pagination options are described in the -github.ListOptions struct and passed to the list methods directly or as an -embedded type of a more specific list options struct (for example -github.PullRequestListOptions). Pages information is available via the -github.Response struct. - - client := github.NewClient(nil) - - opt := &github.RepositoryListByOrgOptions{ - ListOptions: github.ListOptions{PerPage: 10}, - } - // get all pages of results - var allRepos []*github.Repository - for { - repos, resp, err := client.Repositories.ListByOrg(ctx, "github", opt) - if err != nil { - return err - } - allRepos = append(allRepos, repos...) - if resp.NextPage == 0 { - break - } - opt.Page = resp.NextPage - } -*/ -package github diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise.go deleted file mode 100644 index 1c9b069566..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// EnterpriseService provides access to the enterprise related functions -// in the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/ -type EnterpriseService service diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go deleted file mode 100644 index daafc5e628..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListRunnerApplicationDownloads lists self-hosted runner application binaries that can be downloaded and run. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-runner-applications-for-an-enterprise -func (s *EnterpriseService) ListRunnerApplicationDownloads(ctx context.Context, enterprise string) ([]*RunnerApplicationDownload, *Response, error) { - u := fmt.Sprintf("enterprises/%v/actions/runners/downloads", enterprise) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rads []*RunnerApplicationDownload - resp, err := s.client.Do(ctx, req, &rads) - if err != nil { - return nil, resp, err - } - - return rads, resp, nil -} - -// CreateRegistrationToken creates a token that can be used to add a self-hosted runner. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-an-enterprise -func (s *EnterpriseService) CreateRegistrationToken(ctx context.Context, enterprise string) (*RegistrationToken, *Response, error) { - u := fmt.Sprintf("enterprises/%v/actions/runners/registration-token", enterprise) - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - registrationToken := new(RegistrationToken) - resp, err := s.client.Do(ctx, req, registrationToken) - if err != nil { - return nil, resp, err - } - - return registrationToken, resp, nil -} - -// ListRunners lists all the self-hosted runners for a enterprise. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-self-hosted-runners-for-an-enterprise -func (s *EnterpriseService) ListRunners(ctx context.Context, enterprise string, opts *ListOptions) (*Runners, *Response, error) { - u := fmt.Sprintf("enterprises/%v/actions/runners", enterprise) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - runners := &Runners{} - resp, err := s.client.Do(ctx, req, &runners) - if err != nil { - return nil, resp, err - } - - return runners, resp, nil -} - -// RemoveRunner forces the removal of a self-hosted runner from an enterprise using the runner id. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-an-enterprise -func (s *EnterpriseService) RemoveRunner(ctx context.Context, enterprise string, runnerID int64) (*Response, error) { - u := fmt.Sprintf("enterprises/%v/actions/runners/%v", enterprise, runnerID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_audit_log.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_audit_log.go deleted file mode 100644 index 4064867338..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_audit_log.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GetAuditLog gets the audit-log entries for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/audit-log#get-the-audit-log-for-an-enterprise -func (s *EnterpriseService) GetAuditLog(ctx context.Context, enterprise string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) { - u := fmt.Sprintf("enterprises/%v/audit-log", enterprise) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var auditEntries []*AuditEntry - resp, err := s.client.Do(ctx, req, &auditEntries) - if err != nil { - return nil, resp, err - } - - return auditEntries, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go deleted file mode 100644 index 3980a86aa4..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// EnterpriseSecurityAnalysisSettings represents security analysis settings for an enterprise. -type EnterpriseSecurityAnalysisSettings struct { - AdvancedSecurityEnabledForNewRepositories *bool `json:"advanced_security_enabled_for_new_repositories,omitempty"` - SecretScanningEnabledForNewRepositories *bool `json:"secret_scanning_enabled_for_new_repositories,omitempty"` - SecretScanningPushProtectionEnabledForNewRepositories *bool `json:"secret_scanning_push_protection_enabled_for_new_repositories,omitempty"` - SecretScanningPushProtectionCustomLink *string `json:"secret_scanning_push_protection_custom_link,omitempty"` -} - -// GetCodeSecurityAndAnalysis gets code security and analysis features for an enterprise. -// -// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/code-security-and-analysis?apiVersion=2022-11-28#get-code-security-and-analysis-features-for-an-enterprise -func (s *EnterpriseService) GetCodeSecurityAndAnalysis(ctx context.Context, enterprise string) (*EnterpriseSecurityAnalysisSettings, *Response, error) { - u := fmt.Sprintf("enterprises/%v/code_security_and_analysis", enterprise) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - settings := new(EnterpriseSecurityAnalysisSettings) - resp, err := s.client.Do(ctx, req, settings) - if err != nil { - return nil, resp, err - } - - return settings, resp, nil -} - -// UpdateCodeSecurityAndAnalysis updates code security and analysis features for new repositories in an enterprise. -// -// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/code-security-and-analysis?apiVersion=2022-11-28#update-code-security-and-analysis-features-for-an-enterprise -func (s *EnterpriseService) UpdateCodeSecurityAndAnalysis(ctx context.Context, enterprise string, settings *EnterpriseSecurityAnalysisSettings) (*Response, error) { - u := fmt.Sprintf("enterprises/%v/code_security_and_analysis", enterprise) - req, err := s.client.NewRequest("PATCH", u, settings) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// EnableDisableSecurityFeature enables or disables a security feature for all repositories in an enterprise. -// -// Valid values for securityProduct: "advanced_security", "secret_scanning", "secret_scanning_push_protection". -// Valid values for enablement: "enable_all", "disable_all". -// -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/enterprise-admin/code-security-and-analysis?apiVersion=2022-11-28#enable-or-disable-a-security-feature -func (s *EnterpriseService) EnableDisableSecurityFeature(ctx context.Context, enterprise, securityProduct, enablement string) (*Response, error) { - u := fmt.Sprintf("enterprises/%v/%v/%v", enterprise, securityProduct, enablement) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/event.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/event.go deleted file mode 100644 index 4ee25603a8..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/event.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "encoding/json" -) - -// Event represents a GitHub event. -type Event struct { - Type *string `json:"type,omitempty"` - Public *bool `json:"public,omitempty"` - RawPayload *json.RawMessage `json:"payload,omitempty"` - Repo *Repository `json:"repo,omitempty"` - Actor *User `json:"actor,omitempty"` - Org *Organization `json:"org,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - ID *string `json:"id,omitempty"` -} - -func (e Event) String() string { - return Stringify(e) -} - -// ParsePayload parses the event payload. For recognized event types, -// a value of the corresponding struct type will be returned. -func (e *Event) ParsePayload() (payload interface{}, err error) { - switch *e.Type { - case "BranchProtectionRuleEvent": - payload = &BranchProtectionRuleEvent{} - case "CheckRunEvent": - payload = &CheckRunEvent{} - case "CheckSuiteEvent": - payload = &CheckSuiteEvent{} - case "CodeScanningAlertEvent": - payload = &CodeScanningAlertEvent{} - case "CommitCommentEvent": - payload = &CommitCommentEvent{} - case "ContentReferenceEvent": - payload = &ContentReferenceEvent{} - case "CreateEvent": - payload = &CreateEvent{} - case "DeleteEvent": - payload = &DeleteEvent{} - case "DeployKeyEvent": - payload = &DeployKeyEvent{} - case "DeploymentEvent": - payload = &DeploymentEvent{} - case "DeploymentProtectionRuleEvent": - payload = &DeploymentProtectionRuleEvent{} - case "DeploymentStatusEvent": - payload = &DeploymentStatusEvent{} - case "DiscussionEvent": - payload = &DiscussionEvent{} - case "DiscussionCommentEvent": - payload = &DiscussionCommentEvent{} - case "ForkEvent": - payload = &ForkEvent{} - case "GitHubAppAuthorizationEvent": - payload = &GitHubAppAuthorizationEvent{} - case "GollumEvent": - payload = &GollumEvent{} - case "InstallationEvent": - payload = &InstallationEvent{} - case "InstallationRepositoriesEvent": - payload = &InstallationRepositoriesEvent{} - case "IssueCommentEvent": - payload = &IssueCommentEvent{} - case "IssuesEvent": - payload = &IssuesEvent{} - case "LabelEvent": - payload = &LabelEvent{} - case "MarketplacePurchaseEvent": - payload = &MarketplacePurchaseEvent{} - case "MemberEvent": - payload = &MemberEvent{} - case "MembershipEvent": - payload = &MembershipEvent{} - case "MergeGroupEvent": - payload = &MergeGroupEvent{} - case "MetaEvent": - payload = &MetaEvent{} - case "MilestoneEvent": - payload = &MilestoneEvent{} - case "OrganizationEvent": - payload = &OrganizationEvent{} - case "OrgBlockEvent": - payload = &OrgBlockEvent{} - case "PackageEvent": - payload = &PackageEvent{} - case "PageBuildEvent": - payload = &PageBuildEvent{} - case "PingEvent": - payload = &PingEvent{} - case "ProjectEvent": - payload = &ProjectEvent{} - case "ProjectCardEvent": - payload = &ProjectCardEvent{} - case "ProjectColumnEvent": - payload = &ProjectColumnEvent{} - case "PublicEvent": - payload = &PublicEvent{} - case "PullRequestEvent": - payload = &PullRequestEvent{} - case "PullRequestReviewEvent": - payload = &PullRequestReviewEvent{} - case "PullRequestReviewCommentEvent": - payload = &PullRequestReviewCommentEvent{} - case "PullRequestReviewThreadEvent": - payload = &PullRequestReviewThreadEvent{} - case "PullRequestTargetEvent": - payload = &PullRequestTargetEvent{} - case "PushEvent": - payload = &PushEvent{} - case "ReleaseEvent": - payload = &ReleaseEvent{} - case "RepositoryEvent": - payload = &RepositoryEvent{} - case "RepositoryDispatchEvent": - payload = &RepositoryDispatchEvent{} - case "RepositoryImportEvent": - payload = &RepositoryImportEvent{} - case "RepositoryVulnerabilityAlertEvent": - payload = &RepositoryVulnerabilityAlertEvent{} - case "SecretScanningAlertEvent": - payload = &SecretScanningAlertEvent{} - case "SecurityAdvisoryEvent": - payload = &SecurityAdvisoryEvent{} - case "StarEvent": - payload = &StarEvent{} - case "StatusEvent": - payload = &StatusEvent{} - case "TeamEvent": - payload = &TeamEvent{} - case "TeamAddEvent": - payload = &TeamAddEvent{} - case "UserEvent": - payload = &UserEvent{} - case "WatchEvent": - payload = &WatchEvent{} - case "WorkflowDispatchEvent": - payload = &WorkflowDispatchEvent{} - case "WorkflowJobEvent": - payload = &WorkflowJobEvent{} - case "WorkflowRunEvent": - payload = &WorkflowRunEvent{} - } - err = json.Unmarshal(*e.RawPayload, &payload) - return payload, err -} - -// Payload returns the parsed event payload. For recognized event types, -// a value of the corresponding struct type will be returned. -// -// Deprecated: Use ParsePayload instead, which returns an error -// rather than panics if JSON unmarshaling raw payload fails. -func (e *Event) Payload() (payload interface{}) { - var err error - payload, err = e.ParsePayload() - if err != nil { - panic(err) - } - return payload -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/event_types.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/event_types.go deleted file mode 100644 index 6a13b286bd..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/event_types.go +++ /dev/null @@ -1,1460 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// These event types are shared between the Events API and used as Webhook payloads. - -package github - -import "encoding/json" - -// RequestedAction is included in a CheckRunEvent when a user has invoked an action, -// i.e. when the CheckRunEvent's Action field is "requested_action". -type RequestedAction struct { - Identifier string `json:"identifier"` // The integrator reference of the action requested by the user. -} - -// BranchProtectionRuleEvent triggered when a check suite is "created", "edited", or "deleted". -// The Webhook event name is "branch_protection_rule". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#branch_protection_rule -type BranchProtectionRuleEvent struct { - Action *string `json:"action,omitempty"` - Rule *BranchProtectionRule `json:"rule,omitempty"` - Changes *ProtectionChanges `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// CheckRunEvent is triggered when a check run is "created", "completed", or "rerequested". -// The Webhook event name is "check_run". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#check_run -type CheckRunEvent struct { - CheckRun *CheckRun `json:"check_run,omitempty"` - // The action performed. Possible values are: "created", "completed", "rerequested" or "requested_action". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - - // The action requested by the user. Populated when the Action is "requested_action". - RequestedAction *RequestedAction `json:"requested_action,omitempty"` // -} - -// CheckSuiteEvent is triggered when a check suite is "completed", "requested", or "rerequested". -// The Webhook event name is "check_suite". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#check_suite -type CheckSuiteEvent struct { - CheckSuite *CheckSuite `json:"check_suite,omitempty"` - // The action performed. Possible values are: "completed", "requested" or "rerequested". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// CommitCommentEvent is triggered when a commit comment is created. -// The Webhook event name is "commit_comment". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#commit_comment -type CommitCommentEvent struct { - Comment *RepositoryComment `json:"comment,omitempty"` - - // The following fields are only populated by Webhook events. - Action *string `json:"action,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ContentReferenceEvent is triggered when the body or comment of an issue or -// pull request includes a URL that matches a configured content reference -// domain. -// The Webhook event name is "content_reference". -// -// GitHub API docs: https://developer.github.com/webhooks/event-payloads/#content_reference -type ContentReferenceEvent struct { - Action *string `json:"action,omitempty"` - ContentReference *ContentReference `json:"content_reference,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// CreateEvent represents a created repository, branch, or tag. -// The Webhook event name is "create". -// -// Note: webhooks will not receive this event for created repositories. -// Additionally, webhooks will not receive this event for tags if more -// than three tags are pushed at once. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/github-event-types#createevent -type CreateEvent struct { - Ref *string `json:"ref,omitempty"` - // RefType is the object that was created. Possible values are: "repository", "branch", "tag". - RefType *string `json:"ref_type,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - Description *string `json:"description,omitempty"` - PusherType *string `json:"pusher_type,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeleteEvent represents a deleted branch or tag. -// The Webhook event name is "delete". -// -// Note: webhooks will not receive this event for tags if more than three tags -// are deleted at once. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/github-event-types#deleteevent -type DeleteEvent struct { - Ref *string `json:"ref,omitempty"` - // RefType is the object that was deleted. Possible values are: "branch", "tag". - RefType *string `json:"ref_type,omitempty"` - - // The following fields are only populated by Webhook events. - PusherType *string `json:"pusher_type,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeployKeyEvent is triggered when a deploy key is added or removed from a repository. -// The Webhook event name is "deploy_key". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#deploy_key -type DeployKeyEvent struct { - // Action is the action that was performed. Possible values are: - // "created" or "deleted". - Action *string `json:"action,omitempty"` - - // The deploy key resource. - Key *Key `json:"key,omitempty"` - - // The Repository where the event occurred - Repo *Repository `json:"repository,omitempty"` - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` - - // The following fields are only populated by Webhook events. - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeploymentEvent represents a deployment. -// The Webhook event name is "deployment". -// -// Events of this type are not visible in timelines, they are only used to trigger hooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#deployment -type DeploymentEvent struct { - Deployment *Deployment `json:"deployment,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeploymentProtectionRuleEvent represents a deployment protection rule event. -// The Webhook event name is "deployment_protection_rule". -// -// GitHub API docs: https://docs.github.com/webhooks-and-events/webhooks/webhook-events-and-payloads#deployment_protection_rule -type DeploymentProtectionRuleEvent struct { - Action *string `json:"action,omitempty"` - Environment *string `json:"environment,omitempty"` - Event *string `json:"event,omitempty"` - - // The URL Github provides for a third-party to use in order to pass/fail a deployment gate - DeploymentCallbackURL *string `json:"deployment_callback_url,omitempty"` - Deployment *Deployment `json:"deployment,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Organization *Organization `json:"organization,omitempty"` - PullRequests []*PullRequest `json:"pull_requests,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DeploymentStatusEvent represents a deployment status. -// The Webhook event name is "deployment_status". -// -// Events of this type are not visible in timelines, they are only used to trigger hooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#deployment_status -type DeploymentStatusEvent struct { - Deployment *Deployment `json:"deployment,omitempty"` - DeploymentStatus *DeploymentStatus `json:"deployment_status,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// DiscussionCommentEvent represents a webhook event for a comment on discussion. -// The Webhook event name is "discussion_comment". -// -// GitHub API docs: https://docs.github.com/webhooks-and-events/webhooks/webhook-events-and-payloads#discussion_comment -type DiscussionCommentEvent struct { - // Action is the action that was performed on the comment. - // Possible values are: "created", "edited", "deleted". ** check what all can be added - Action *string `json:"action,omitempty"` - Discussion *Discussion `json:"discussion,omitempty"` - Comment *CommentDiscussion `json:"comment,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// CommentDiscussion represents a comment in a GitHub DiscussionCommentEvent. -type CommentDiscussion struct { - AuthorAssociation *string `json:"author_association,omitempty"` - Body *string `json:"body,omitempty"` - ChildCommentCount *int `json:"child_comment_count,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - DiscussionID *int64 `json:"discussion_id,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - ParentID *int64 `json:"parent_id,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - User *User `json:"user,omitempty"` -} - -// DiscussionEvent represents a webhook event for a discussion. -// The Webhook event name is "discussion". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#discussion -type DiscussionEvent struct { - // Action is the action that was performed. Possible values are: - // created, edited, deleted, pinned, unpinned, locked, unlocked, - // transferred, category_changed, answered, or unanswered. - Action *string `json:"action,omitempty"` - Discussion *Discussion `json:"discussion,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// Discussion represents a discussion in a GitHub DiscussionEvent. -type Discussion struct { - RepositoryURL *string `json:"repository_url,omitempty"` - DiscussionCategory *DiscussionCategory `json:"category,omitempty"` - AnswerHTMLURL *string `json:"answer_html_url,omitempty"` - AnswerChosenAt *Timestamp `json:"answer_chosen_at,omitempty"` - AnswerChosenBy *string `json:"answer_chosen_by,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Number *int `json:"number,omitempty"` - Title *string `json:"title,omitempty"` - User *User `json:"user,omitempty"` - State *string `json:"state,omitempty"` - Locked *bool `json:"locked,omitempty"` - Comments *int `json:"comments,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - AuthorAssociation *string `json:"author_association,omitempty"` - ActiveLockReason *string `json:"active_lock_reason,omitempty"` - Body *string `json:"body,omitempty"` -} - -// DiscussionCategory represents a discussion category in a GitHub DiscussionEvent. -type DiscussionCategory struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - RepositoryID *int64 `json:"repository_id,omitempty"` - Emoji *string `json:"emoji,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Slug *string `json:"slug,omitempty"` - IsAnswerable *bool `json:"is_answerable,omitempty"` -} - -// ForkEvent is triggered when a user forks a repository. -// The Webhook event name is "fork". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#fork -type ForkEvent struct { - // Forkee is the created repository. - Forkee *Repository `json:"forkee,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// GitHubAppAuthorizationEvent is triggered when a user's authorization for a -// GitHub Application is revoked. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#github_app_authorization -type GitHubAppAuthorizationEvent struct { - // The action performed. Possible value is: "revoked". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// Page represents a single Wiki page. -type Page struct { - PageName *string `json:"page_name,omitempty"` - Title *string `json:"title,omitempty"` - Summary *string `json:"summary,omitempty"` - Action *string `json:"action,omitempty"` - SHA *string `json:"sha,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` -} - -// GollumEvent is triggered when a Wiki page is created or updated. -// The Webhook event name is "gollum". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#gollum -type GollumEvent struct { - Pages []*Page `json:"pages,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// EditChange represents the changes when an issue, pull request, comment, -// or repository has been edited. -type EditChange struct { - Title *EditTitle `json:"title,omitempty"` - Body *EditBody `json:"body,omitempty"` - Base *EditBase `json:"base,omitempty"` - Repo *EditRepo `json:"repository,omitempty"` - Owner *EditOwner `json:"owner,omitempty"` -} - -// EditTitle represents a pull-request title change. -type EditTitle struct { - From *string `json:"from,omitempty"` -} - -// EditBody represents a change of pull-request body. -type EditBody struct { - From *string `json:"from,omitempty"` -} - -// EditBase represents the change of a pull-request base branch. -type EditBase struct { - Ref *EditRef `json:"ref,omitempty"` - SHA *EditSHA `json:"sha,omitempty"` -} - -// EditRef represents a ref change of a pull-request. -type EditRef struct { - From *string `json:"from,omitempty"` -} - -// EditRepo represents a change of repository name. -type EditRepo struct { - Name *RepoName `json:"name,omitempty"` -} - -// EditOwner represents a change of repository ownership. -type EditOwner struct { - OwnerInfo *OwnerInfo `json:"from,omitempty"` -} - -// OwnerInfo represents the account info of the owner of the repo (could be User or Organization but both are User structs). -type OwnerInfo struct { - User *User `json:"user,omitempty"` - Org *User `json:"organization,omitempty"` -} - -// RepoName represents a change of repository name. -type RepoName struct { - From *string `json:"from,omitempty"` -} - -// EditSHA represents a sha change of a pull-request. -type EditSHA struct { - From *string `json:"from,omitempty"` -} - -// ProjectChange represents the changes when a project has been edited. -type ProjectChange struct { - Name *ProjectName `json:"name,omitempty"` - Body *ProjectBody `json:"body,omitempty"` -} - -// ProjectName represents a project name change. -type ProjectName struct { - From *string `json:"from,omitempty"` -} - -// ProjectBody represents a project body change. -type ProjectBody struct { - From *string `json:"from,omitempty"` -} - -// ProjectCardChange represents the changes when a project card has been edited. -type ProjectCardChange struct { - Note *ProjectCardNote `json:"note,omitempty"` -} - -// ProjectCardNote represents a change of a note of a project card. -type ProjectCardNote struct { - From *string `json:"from,omitempty"` -} - -// ProjectColumnChange represents the changes when a project column has been edited. -type ProjectColumnChange struct { - Name *ProjectColumnName `json:"name,omitempty"` -} - -// ProjectColumnName represents a project column name change. -type ProjectColumnName struct { - From *string `json:"from,omitempty"` -} - -// TeamChange represents the changes when a team has been edited. -type TeamChange struct { - Description *TeamDescription `json:"description,omitempty"` - Name *TeamName `json:"name,omitempty"` - Privacy *TeamPrivacy `json:"privacy,omitempty"` - Repository *TeamRepository `json:"repository,omitempty"` -} - -// TeamDescription represents a team description change. -type TeamDescription struct { - From *string `json:"from,omitempty"` -} - -// TeamName represents a team name change. -type TeamName struct { - From *string `json:"from,omitempty"` -} - -// TeamPrivacy represents a team privacy change. -type TeamPrivacy struct { - From *string `json:"from,omitempty"` -} - -// TeamRepository represents a team repository permission change. -type TeamRepository struct { - Permissions *TeamPermissions `json:"permissions,omitempty"` -} - -// TeamPermissions represents a team permission change. -type TeamPermissions struct { - From *TeamPermissionsFrom `json:"from,omitempty"` -} - -// TeamPermissionsFrom represents a team permission change. -type TeamPermissionsFrom struct { - Admin *bool `json:"admin,omitempty"` - Pull *bool `json:"pull,omitempty"` - Push *bool `json:"push,omitempty"` -} - -// InstallationEvent is triggered when a GitHub App has been installed, uninstalled, suspend, unsuspended -// or new permissions have been accepted. -// The Webhook event name is "installation". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#installation -type InstallationEvent struct { - // The action that was performed. Can be either "created", "deleted", "suspend", "unsuspend" or "new_permissions_accepted". - Action *string `json:"action,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - Requester *User `json:"requester,omitempty"` -} - -// InstallationRepositoriesEvent is triggered when a repository is added or -// removed from an installation. The Webhook event name is "installation_repositories". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#installation_repositories -type InstallationRepositoriesEvent struct { - // The action that was performed. Can be either "added" or "removed". - Action *string `json:"action,omitempty"` - RepositoriesAdded []*Repository `json:"repositories_added,omitempty"` - RepositoriesRemoved []*Repository `json:"repositories_removed,omitempty"` - RepositorySelection *string `json:"repository_selection,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// IssueCommentEvent is triggered when an issue comment is created on an issue -// or pull request. -// The Webhook event name is "issue_comment". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#issue_comment -type IssueCommentEvent struct { - // Action is the action that was performed on the comment. - // Possible values are: "created", "edited", "deleted". - Action *string `json:"action,omitempty"` - Issue *Issue `json:"issue,omitempty"` - Comment *IssueComment `json:"comment,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` -} - -// IssuesEvent is triggered when an issue is opened, edited, deleted, transferred, -// pinned, unpinned, closed, reopened, assigned, unassigned, labeled, unlabeled, -// locked, unlocked, milestoned, or demilestoned. -// The Webhook event name is "issues". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#issues -type IssuesEvent struct { - // Action is the action that was performed. Possible values are: "opened", - // "edited", "deleted", "transferred", "pinned", "unpinned", "closed", "reopened", - // "assigned", "unassigned", "labeled", "unlabeled", "locked", "unlocked", - // "milestoned", or "demilestoned". - Action *string `json:"action,omitempty"` - Issue *Issue `json:"issue,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Label *Label `json:"label,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` -} - -// LabelEvent is triggered when a repository's label is created, edited, or deleted. -// The Webhook event name is "label" -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#label -type LabelEvent struct { - // Action is the action that was performed. Possible values are: - // "created", "edited", "deleted" - Action *string `json:"action,omitempty"` - Label *Label `json:"label,omitempty"` - Changes *EditChange `json:"changes,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MarketplacePurchaseEvent is triggered when a user purchases, cancels, or changes -// their GitHub Marketplace plan. -// Webhook event name "marketplace_purchase". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#marketplace_purchase -type MarketplacePurchaseEvent struct { - // Action is the action that was performed. Possible values are: - // "purchased", "cancelled", "pending_change", "pending_change_cancelled", "changed". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - EffectiveDate *Timestamp `json:"effective_date,omitempty"` - MarketplacePurchase *MarketplacePurchase `json:"marketplace_purchase,omitempty"` - PreviousMarketplacePurchase *MarketplacePurchase `json:"previous_marketplace_purchase,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MemberEvent is triggered when a user is added as a collaborator to a repository. -// The Webhook event name is "member". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#member -type MemberEvent struct { - // Action is the action that was performed. Possible value is: "added". - Action *string `json:"action,omitempty"` - Member *User `json:"member,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MembershipEvent is triggered when a user is added or removed from a team. -// The Webhook event name is "membership". -// -// Events of this type are not visible in timelines, they are only used to -// trigger organization webhooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#membership -type MembershipEvent struct { - // Action is the action that was performed. Possible values are: "added", "removed". - Action *string `json:"action,omitempty"` - // Scope is the scope of the membership. Possible value is: "team". - Scope *string `json:"scope,omitempty"` - Member *User `json:"member,omitempty"` - Team *Team `json:"team,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MergeGroup represents the merge group in a merge queue. -type MergeGroup struct { - // The SHA of the merge group. - HeadSHA *string `json:"head_sha,omitempty"` - // The full ref of the merge group. - HeadRef *string `json:"head_ref,omitempty"` - // The SHA of the merge group's parent commit. - BaseSHA *string `json:"base_sha,omitempty"` - // The full ref of the branch the merge group will be merged into. - BaseRef *string `json:"base_ref,omitempty"` - // An expanded representation of the head_sha commit. - HeadCommit *Commit `json:"head_commit,omitempty"` -} - -// MergeGroupEvent represents activity related to merge groups in a merge queue. The type of activity is specified -// in the action property of the payload object. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#merge_group -type MergeGroupEvent struct { - // The action that was performed. Currently, can only be checks_requested. - Action *string `json:"action,omitempty"` - // The merge group. - MergeGroup *MergeGroup `json:"merge_group,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Installation *Installation `json:"installation,omitempty"` - Sender *User `json:"sender,omitempty"` -} - -// MetaEvent is triggered when the webhook that this event is configured on is deleted. -// This event will only listen for changes to the particular hook the event is installed on. -// Therefore, it must be selected for each hook that you'd like to receive meta events for. -// The Webhook event name is "meta". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#meta -type MetaEvent struct { - // Action is the action that was performed. Possible value is: "deleted". - Action *string `json:"action,omitempty"` - // The ID of the modified webhook. - HookID *int64 `json:"hook_id,omitempty"` - // The modified webhook. - // This will contain different keys based on the type of webhook it is: repository, - // organization, business, app, or GitHub Marketplace. - Hook *Hook `json:"hook,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// MilestoneEvent is triggered when a milestone is created, closed, opened, edited, or deleted. -// The Webhook event name is "milestone". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#milestone -type MilestoneEvent struct { - // Action is the action that was performed. Possible values are: - // "created", "closed", "opened", "edited", "deleted" - Action *string `json:"action,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Org *Organization `json:"organization,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// OrganizationEvent is triggered when an organization is deleted and renamed, and when a user is added, -// removed, or invited to an organization. -// Events of this type are not visible in timelines. These events are only used to trigger organization hooks. -// Webhook event name is "organization". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#organization -type OrganizationEvent struct { - // Action is the action that was performed. - // Possible values are: "deleted", "renamed", "member_added", "member_removed", or "member_invited". - Action *string `json:"action,omitempty"` - - // Invitation is the invitation for the user or email if the action is "member_invited". - Invitation *Invitation `json:"invitation,omitempty"` - - // Membership is the membership between the user and the organization. - // Not present when the action is "member_invited". - Membership *Membership `json:"membership,omitempty"` - - Organization *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// OrgBlockEvent is triggered when an organization blocks or unblocks a user. -// The Webhook event name is "org_block". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#org_block -type OrgBlockEvent struct { - // Action is the action that was performed. - // Can be "blocked" or "unblocked". - Action *string `json:"action,omitempty"` - BlockedUser *User `json:"blocked_user,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - - // The following fields are only populated by Webhook events. - Installation *Installation `json:"installation,omitempty"` -} - -// PackageEvent represents activity related to GitHub Packages. -// The Webhook event name is "package". -// -// This event is triggered when a GitHub Package is published or updated. -// -// GitHub API docs: https://developer.github.com/webhooks/event-payloads/#package -type PackageEvent struct { - // Action is the action that was performed. - // Can be "published" or "updated". - Action *string `json:"action,omitempty"` - Package *Package `json:"package,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - - // The following fields are only populated by Webhook events. - Installation *Installation `json:"installation,omitempty"` -} - -// PageBuildEvent represents an attempted build of a GitHub Pages site, whether -// successful or not. -// The Webhook event name is "page_build". -// -// This event is triggered on push to a GitHub Pages enabled branch (gh-pages -// for project pages, master for user and organization pages). -// -// Events of this type are not visible in timelines, they are only used to trigger hooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#page_build -type PageBuildEvent struct { - Build *PagesBuild `json:"build,omitempty"` - - // The following fields are only populated by Webhook events. - ID *int64 `json:"id,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PingEvent is triggered when a Webhook is added to GitHub. -// -// GitHub API docs: https://developer.github.com/webhooks/#ping-event -type PingEvent struct { - // Random string of GitHub zen. - Zen *string `json:"zen,omitempty"` - // The ID of the webhook that triggered the ping. - HookID *int64 `json:"hook_id,omitempty"` - // The webhook configuration. - Hook *Hook `json:"hook,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ProjectEvent is triggered when project is created, modified or deleted. -// The webhook event name is "project". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#project -type ProjectEvent struct { - Action *string `json:"action,omitempty"` - Changes *ProjectChange `json:"changes,omitempty"` - Project *Project `json:"project,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ProjectCardEvent is triggered when a project card is created, updated, moved, converted to an issue, or deleted. -// The webhook event name is "project_card". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#project_card -type ProjectCardEvent struct { - Action *string `json:"action,omitempty"` - Changes *ProjectCardChange `json:"changes,omitempty"` - AfterID *int64 `json:"after_id,omitempty"` - ProjectCard *ProjectCard `json:"project_card,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// ProjectColumnEvent is triggered when a project column is created, updated, moved, or deleted. -// The webhook event name is "project_column". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#project_column -type ProjectColumnEvent struct { - Action *string `json:"action,omitempty"` - Changes *ProjectColumnChange `json:"changes,omitempty"` - AfterID *int64 `json:"after_id,omitempty"` - ProjectColumn *ProjectColumn `json:"project_column,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PublicEvent is triggered when a private repository is open sourced. -// According to GitHub: "Without a doubt: the best GitHub event." -// The Webhook event name is "public". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#public -type PublicEvent struct { - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PullRequestEvent is triggered when a pull request is assigned, unassigned, labeled, -// unlabeled, opened, edited, closed, reopened, synchronize, ready_for_review, -// locked, unlocked, a pull request review is requested, or a review request is removed. -// The Webhook event name is "pull_request". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/github-event-types#pullrequestevent -type PullRequestEvent struct { - // Action is the action that was performed. Possible values are: - // "assigned", "unassigned", "review_requested", "review_request_removed", "labeled", "unlabeled", - // "opened", "edited", "closed", "ready_for_review", "locked", "unlocked", or "reopened". - // If the action is "closed" and the "merged" key is "false", the pull request was closed with unmerged commits. - // If the action is "closed" and the "merged" key is "true", the pull request was merged. - // While webhooks are also triggered when a pull request is synchronized, Events API timelines - // don't include pull request events with the "synchronize" action. - Action *string `json:"action,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Number *int `json:"number,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - // RequestedReviewer is populated in "review_requested", "review_request_removed" event deliveries. - // A request affecting multiple reviewers at once is split into multiple - // such event deliveries, each with a single, different RequestedReviewer. - RequestedReviewer *User `json:"requested_reviewer,omitempty"` - // In the event that a team is requested instead of a user, "requested_team" gets sent in place of - // "requested_user" with the same delivery behavior. - RequestedTeam *Team `json:"requested_team,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - Label *Label `json:"label,omitempty"` // Populated in "labeled" event deliveries. - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` - - // The following fields are only populated when the Action is "synchronize". - Before *string `json:"before,omitempty"` - After *string `json:"after,omitempty"` -} - -// PullRequestReviewEvent is triggered when a review is submitted on a pull -// request. -// The Webhook event name is "pull_request_review". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request_review -type PullRequestReviewEvent struct { - // Action is always "submitted". - Action *string `json:"action,omitempty"` - Review *PullRequestReview `json:"review,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` -} - -// PullRequestReviewCommentEvent is triggered when a comment is created on a -// portion of the unified diff of a pull request. -// The Webhook event name is "pull_request_review_comment". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request_review_comment -type PullRequestReviewCommentEvent struct { - // Action is the action that was performed on the comment. - // Possible values are: "created", "edited", "deleted". - Action *string `json:"action,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - Comment *PullRequestComment `json:"comment,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PullRequestReviewThreadEvent is triggered when a comment made as part of a -// review of a pull request is marked resolved or unresolved. -// The Webhook event name is "pull_request_review_thread". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request_review_thread -type PullRequestReviewThreadEvent struct { - // Action is the action that was performed on the comment. - // Possible values are: "resolved", "unresolved". - Action *string `json:"action,omitempty"` - Thread *PullRequestThread `json:"thread,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// PullRequestTargetEvent is triggered when a pull request is assigned, unassigned, labeled, -// unlabeled, opened, edited, closed, reopened, synchronize, ready_for_review, -// locked, unlocked, a pull request review is requested, or a review request is removed. -// The Webhook event name is "pull_request_target". -// -// GitHub API docs: https://docs.github.com/en/actions/events-that-trigger-workflows#pull_request_target -type PullRequestTargetEvent struct { - // Action is the action that was performed. Possible values are: - // "assigned", "unassigned", "labeled", "unlabeled", "opened", "edited", "closed", "reopened", - // "ready_for_review", "locked", "unlocked", "review_requested" or "review_request_removed". - // If the action is "closed" and the "merged" key is "false", the pull request was closed with unmerged commits. - // If the action is "closed" and the "merged" key is "true", the pull request was merged. - // While webhooks are also triggered when a pull request is synchronized, Events API timelines - // don't include pull request events with the "synchronize" action. - Action *string `json:"action,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Number *int `json:"number,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - // RequestedReviewer is populated in "review_requested", "review_request_removed" event deliveries. - // A request affecting multiple reviewers at once is split into multiple - // such event deliveries, each with a single, different RequestedReviewer. - RequestedReviewer *User `json:"requested_reviewer,omitempty"` - // In the event that a team is requested instead of a user, "requested_team" gets sent in place of - // "requested_user" with the same delivery behavior. - RequestedTeam *Team `json:"requested_team,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - Label *Label `json:"label,omitempty"` // Populated in "labeled" event deliveries. - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` - - // The following fields are only populated when the Action is "synchronize". - Before *string `json:"before,omitempty"` - After *string `json:"after,omitempty"` -} - -// PushEvent represents a git push to a GitHub repository. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#push -type PushEvent struct { - PushID *int64 `json:"push_id,omitempty"` - Head *string `json:"head,omitempty"` - Ref *string `json:"ref,omitempty"` - Size *int `json:"size,omitempty"` - Commits []*HeadCommit `json:"commits,omitempty"` - Before *string `json:"before,omitempty"` - DistinctSize *int `json:"distinct_size,omitempty"` - - // The following fields are only populated by Webhook events. - Action *string `json:"action,omitempty"` - After *string `json:"after,omitempty"` - Created *bool `json:"created,omitempty"` - Deleted *bool `json:"deleted,omitempty"` - Forced *bool `json:"forced,omitempty"` - BaseRef *string `json:"base_ref,omitempty"` - Compare *string `json:"compare,omitempty"` - Repo *PushEventRepository `json:"repository,omitempty"` - HeadCommit *HeadCommit `json:"head_commit,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` - - // The following field is only present when the webhook is triggered on - // a repository belonging to an organization. - Organization *Organization `json:"organization,omitempty"` -} - -func (p PushEvent) String() string { - return Stringify(p) -} - -// HeadCommit represents a git commit in a GitHub PushEvent. -type HeadCommit struct { - Message *string `json:"message,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - URL *string `json:"url,omitempty"` - Distinct *bool `json:"distinct,omitempty"` - - // The following fields are only populated by Events API. - SHA *string `json:"sha,omitempty"` - - // The following fields are only populated by Webhook events. - ID *string `json:"id,omitempty"` - TreeID *string `json:"tree_id,omitempty"` - Timestamp *Timestamp `json:"timestamp,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Added []string `json:"added,omitempty"` - Removed []string `json:"removed,omitempty"` - Modified []string `json:"modified,omitempty"` -} - -func (h HeadCommit) String() string { - return Stringify(h) -} - -// PushEventRepository represents the repo object in a PushEvent payload. -type PushEventRepository struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Owner *User `json:"owner,omitempty"` - Private *bool `json:"private,omitempty"` - Description *string `json:"description,omitempty"` - Fork *bool `json:"fork,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Homepage *string `json:"homepage,omitempty"` - PullsURL *string `json:"pulls_url,omitempty"` - Size *int `json:"size,omitempty"` - StargazersCount *int `json:"stargazers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` - Language *string `json:"language,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasDownloads *bool `json:"has_downloads,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - HasPages *bool `json:"has_pages,omitempty"` - ForksCount *int `json:"forks_count,omitempty"` - Archived *bool `json:"archived,omitempty"` - Disabled *bool `json:"disabled,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - Organization *string `json:"organization,omitempty"` - URL *string `json:"url,omitempty"` - ArchiveURL *string `json:"archive_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` - Topics []string `json:"topics,omitempty"` -} - -// PushEventRepoOwner is a basic representation of user/org in a PushEvent payload. -type PushEventRepoOwner struct { - Name *string `json:"name,omitempty"` - Email *string `json:"email,omitempty"` -} - -// ReleaseEvent is triggered when a release is published, unpublished, created, -// edited, deleted, or prereleased. -// The Webhook event name is "release". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#release -type ReleaseEvent struct { - // Action is the action that was performed. Possible values are: "published", "unpublished", - // "created", "edited", "deleted", or "prereleased". - Action *string `json:"action,omitempty"` - Release *RepositoryRelease `json:"release,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// RepositoryEvent is triggered when a repository is created, archived, unarchived, -// renamed, edited, transferred, made public, or made private. Organization hooks are -// also trigerred when a repository is deleted. -// The Webhook event name is "repository". -// -// Events of this type are not visible in timelines, they are only used to -// trigger organization webhooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#repository -type RepositoryEvent struct { - // Action is the action that was performed. Possible values are: "created", - // "deleted" (organization hooks only), "archived", "unarchived", "edited", "renamed", - // "transferred", "publicized", or "privatized". - Action *string `json:"action,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// RepositoryDispatchEvent is triggered when a client sends a POST request to the repository dispatch event endpoint. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#repository_dispatch -type RepositoryDispatchEvent struct { - // Action is the event_type that submitted with the repository dispatch payload. Value can be any string. - Action *string `json:"action,omitempty"` - Branch *string `json:"branch,omitempty"` - ClientPayload json.RawMessage `json:"client_payload,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// RepositoryImportEvent represents the activity related to a repository being imported to GitHub. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#repository_import -type RepositoryImportEvent struct { - // Status represents the final state of the import. This can be one of "success", "cancelled", or "failure". - Status *string `json:"status,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` -} - -// RepositoryVulnerabilityAlertEvent is triggered when a security alert is created, dismissed, or resolved. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#repository_vulnerability_alert -type RepositoryVulnerabilityAlertEvent struct { - // Action is the action that was performed. Possible values are: "create", "dismiss", "resolve". - Action *string `json:"action,omitempty"` - - // The security alert of the vulnerable dependency. - Alert *RepositoryVulnerabilityAlert `json:"alert,omitempty"` - - // The repository of the vulnerable dependency. - Repository *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Installation *Installation `json:"installation,omitempty"` - - // The user that triggered the event. - Sender *User `json:"sender,omitempty"` -} - -// RepositoryVulnerabilityAlert represents a repository security alert. -type RepositoryVulnerabilityAlert struct { - ID *int64 `json:"id,omitempty"` - AffectedRange *string `json:"affected_range,omitempty"` - AffectedPackageName *string `json:"affected_package_name,omitempty"` - ExternalReference *string `json:"external_reference,omitempty"` - ExternalIdentifier *string `json:"external_identifier,omitempty"` - GitHubSecurityAdvisoryID *string `json:"ghsa_id,omitempty"` - Severity *string `json:"severity,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - FixedIn *string `json:"fixed_in,omitempty"` - Dismisser *User `json:"dismisser,omitempty"` - DismissReason *string `json:"dismiss_reason,omitempty"` - DismissedAt *Timestamp `json:"dismissed_at,omitempty"` -} - -// SecretScanningAlertEvent is triggered when a secret scanning alert occurs in a repository. -// The Webhook name is secret_scanning_alert. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#secret_scanning_alert -type SecretScanningAlertEvent struct { - // Action is the action that was performed. Possible values are: "created", "resolved", or "reopened". - Action *string `json:"action,omitempty"` - - // Alert is the secret scanning alert involved in the event. - Alert *SecretScanningAlert `json:"alert,omitempty"` - - // Only populated by the "resolved" and "reopen" actions - Sender *User `json:"sender,omitempty"` - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Enterprise *Enterprise `json:"enterprise,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// StarEvent is triggered when a star is added or removed from a repository. -// The Webhook event name is "star". -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#star -type StarEvent struct { - // Action is the action that was performed. Possible values are: "created" or "deleted". - Action *string `json:"action,omitempty"` - - // StarredAt is the time the star was created. It will be null for the "deleted" action. - StarredAt *Timestamp `json:"starred_at,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// StatusEvent is triggered when the status of a Git commit changes. -// The Webhook event name is "status". -// -// Events of this type are not visible in timelines, they are only used to -// trigger hooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#status -type StatusEvent struct { - SHA *string `json:"sha,omitempty"` - // State is the new state. Possible values are: "pending", "success", "failure", "error". - State *string `json:"state,omitempty"` - Description *string `json:"description,omitempty"` - TargetURL *string `json:"target_url,omitempty"` - Branches []*Branch `json:"branches,omitempty"` - - // The following fields are only populated by Webhook events. - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Context *string `json:"context,omitempty"` - Commit *RepositoryCommit `json:"commit,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// TeamEvent is triggered when an organization's team is created, modified or deleted. -// The Webhook event name is "team". -// -// Events of this type are not visible in timelines. These events are only used -// to trigger hooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#team -type TeamEvent struct { - Action *string `json:"action,omitempty"` - Team *Team `json:"team,omitempty"` - Changes *TeamChange `json:"changes,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// TeamAddEvent is triggered when a repository is added to a team. -// The Webhook event name is "team_add". -// -// Events of this type are not visible in timelines. These events are only used -// to trigger hooks. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#team_add -type TeamAddEvent struct { - Team *Team `json:"team,omitempty"` - Repo *Repository `json:"repository,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// UserEvent is triggered when a user is created or deleted. -// The Webhook event name is "user". -// -// Only global webhooks can subscribe to this event type. -// -// GitHub API docs: https://developer.github.com/enterprise/v3/activity/events/types/#userevent-enterprise -type UserEvent struct { - User *User `json:"user,omitempty"` - // The action performed. Possible values are: "created" or "deleted". - Action *string `json:"action,omitempty"` - Enterprise *Enterprise `json:"enterprise,omitempty"` - Sender *User `json:"sender,omitempty"` - - // The following fields are only populated by Webhook events. - Installation *Installation `json:"installation,omitempty"` -} - -// WatchEvent is related to starring a repository, not watching. See this API -// blog post for an explanation: https://developer.github.com/changes/2012-09-05-watcher-api/ -// -// The event’s actor is the user who starred a repository, and the event’s -// repository is the repository that was starred. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#watch -type WatchEvent struct { - // Action is the action that was performed. Possible value is: "started". - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// WorkflowDispatchEvent is triggered when someone triggers a workflow run on GitHub or -// sends a POST request to the create a workflow dispatch event endpoint. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#workflow_dispatch -type WorkflowDispatchEvent struct { - Inputs json.RawMessage `json:"inputs,omitempty"` - Ref *string `json:"ref,omitempty"` - Workflow *string `json:"workflow,omitempty"` - - // The following fields are only populated by Webhook events. - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// WorkflowJobEvent is triggered when a job is queued, started or completed. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job -type WorkflowJobEvent struct { - WorkflowJob *WorkflowJob `json:"workflow_job,omitempty"` - - Action *string `json:"action,omitempty"` - - // The following fields are only populated by Webhook events. - - // Org is not nil when the webhook is configured for an organization or the event - // occurs from activity in a repository owned by an organization. - Org *Organization `json:"organization,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// WorkflowRunEvent is triggered when a GitHub Actions workflow run is requested or completed. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#workflow_run -type WorkflowRunEvent struct { - Action *string `json:"action,omitempty"` - Workflow *Workflow `json:"workflow,omitempty"` - WorkflowRun *WorkflowRun `json:"workflow_run,omitempty"` - - // The following fields are only populated by Webhook events. - Org *Organization `json:"organization,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` - Installation *Installation `json:"installation,omitempty"` -} - -// SecurityAdvisory represents the advisory object in SecurityAdvisoryEvent payload. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#security_advisory -type SecurityAdvisory struct { - GHSAID *string `json:"ghsa_id,omitempty"` - Summary *string `json:"summary,omitempty"` - Description *string `json:"description,omitempty"` - Severity *string `json:"severity,omitempty"` - Identifiers []*AdvisoryIdentifier `json:"identifiers,omitempty"` - References []*AdvisoryReference `json:"references,omitempty"` - PublishedAt *Timestamp `json:"published_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - WithdrawnAt *Timestamp `json:"withdrawn_at,omitempty"` - Vulnerabilities []*AdvisoryVulnerability `json:"vulnerabilities,omitempty"` -} - -// AdvisoryIdentifier represents the identifier for a Security Advisory. -type AdvisoryIdentifier struct { - Value *string `json:"value,omitempty"` - Type *string `json:"type,omitempty"` -} - -// AdvisoryReference represents the reference url for the security advisory. -type AdvisoryReference struct { - URL *string `json:"url,omitempty"` -} - -// AdvisoryVulnerability represents the vulnerability object for a Security Advisory. -type AdvisoryVulnerability struct { - Package *VulnerabilityPackage `json:"package,omitempty"` - Severity *string `json:"severity,omitempty"` - VulnerableVersionRange *string `json:"vulnerable_version_range,omitempty"` - FirstPatchedVersion *FirstPatchedVersion `json:"first_patched_version,omitempty"` -} - -// VulnerabilityPackage represents the package object for an Advisory Vulnerability. -type VulnerabilityPackage struct { - Ecosystem *string `json:"ecosystem,omitempty"` - Name *string `json:"name,omitempty"` -} - -// FirstPatchedVersion represents the identifier for the first patched version of that vulnerability. -type FirstPatchedVersion struct { - Identifier *string `json:"identifier,omitempty"` -} - -// SecurityAdvisoryEvent is triggered when a security-related vulnerability is found in software on GitHub. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#security_advisory -type SecurityAdvisoryEvent struct { - Action *string `json:"action,omitempty"` - SecurityAdvisory *SecurityAdvisory `json:"security_advisory,omitempty"` -} - -// CodeScanningAlertEvent is triggered when a code scanning finds a potential vulnerability or error in your code. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#code_scanning_alert -type CodeScanningAlertEvent struct { - Action *string `json:"action,omitempty"` - Alert *Alert `json:"alert,omitempty"` - Ref *string `json:"ref,omitempty"` - // CommitOID is the commit SHA of the code scanning alert - CommitOID *string `json:"commit_oid,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Org *Organization `json:"organization,omitempty"` - Sender *User `json:"sender,omitempty"` - - Installation *Installation `json:"installation,omitempty"` -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/gists.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/gists.go deleted file mode 100644 index 80961fcb90..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/gists.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// GistsService handles communication with the Gist related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/gists -type GistsService service - -// Gist represents a GitHub's gist. -type Gist struct { - ID *string `json:"id,omitempty"` - Description *string `json:"description,omitempty"` - Public *bool `json:"public,omitempty"` - Owner *User `json:"owner,omitempty"` - Files map[GistFilename]GistFile `json:"files,omitempty"` - Comments *int `json:"comments,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GitPullURL *string `json:"git_pull_url,omitempty"` - GitPushURL *string `json:"git_push_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (g Gist) String() string { - return Stringify(g) -} - -// GistFilename represents filename on a gist. -type GistFilename string - -// GistFile represents a file on a gist. -type GistFile struct { - Size *int `json:"size,omitempty"` - Filename *string `json:"filename,omitempty"` - Language *string `json:"language,omitempty"` - Type *string `json:"type,omitempty"` - RawURL *string `json:"raw_url,omitempty"` - Content *string `json:"content,omitempty"` -} - -func (g GistFile) String() string { - return Stringify(g) -} - -// GistCommit represents a commit on a gist. -type GistCommit struct { - URL *string `json:"url,omitempty"` - Version *string `json:"version,omitempty"` - User *User `json:"user,omitempty"` - ChangeStatus *CommitStats `json:"change_status,omitempty"` - CommittedAt *Timestamp `json:"committed_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (gc GistCommit) String() string { - return Stringify(gc) -} - -// GistFork represents a fork of a gist. -type GistFork struct { - URL *string `json:"url,omitempty"` - User *User `json:"user,omitempty"` - ID *string `json:"id,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (gf GistFork) String() string { - return Stringify(gf) -} - -// GistListOptions specifies the optional parameters to the -// GistsService.List, GistsService.ListAll, and GistsService.ListStarred methods. -type GistListOptions struct { - // Since filters Gists by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// List gists for a user. Passing the empty string will list -// all public gists if called anonymously. However, if the call -// is authenticated, it will returns all gists for the authenticated -// user. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gists-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gists-for-a-user -func (s *GistsService) List(ctx context.Context, user string, opts *GistListOptions) ([]*Gist, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/gists", user) - } else { - u = "gists" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gists []*Gist - resp, err := s.client.Do(ctx, req, &gists) - if err != nil { - return nil, resp, err - } - - return gists, resp, nil -} - -// ListAll lists all public gists. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-public-gists -func (s *GistsService) ListAll(ctx context.Context, opts *GistListOptions) ([]*Gist, *Response, error) { - u, err := addOptions("gists/public", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gists []*Gist - resp, err := s.client.Do(ctx, req, &gists) - if err != nil { - return nil, resp, err - } - - return gists, resp, nil -} - -// ListStarred lists starred gists of authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-starred-gists -func (s *GistsService) ListStarred(ctx context.Context, opts *GistListOptions) ([]*Gist, *Response, error) { - u, err := addOptions("gists/starred", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gists []*Gist - resp, err := s.client.Do(ctx, req, &gists) - if err != nil { - return nil, resp, err - } - - return gists, resp, nil -} - -// Get a single gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#get-a-gist -func (s *GistsService) Get(ctx context.Context, id string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gist := new(Gist) - resp, err := s.client.Do(ctx, req, gist) - if err != nil { - return nil, resp, err - } - - return gist, resp, nil -} - -// GetRevision gets a specific revision of a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#get-a-gist-revision -func (s *GistsService) GetRevision(ctx context.Context, id, sha string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v/%v", id, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gist := new(Gist) - resp, err := s.client.Do(ctx, req, gist) - if err != nil { - return nil, resp, err - } - - return gist, resp, nil -} - -// Create a gist for authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#create-a-gist -func (s *GistsService) Create(ctx context.Context, gist *Gist) (*Gist, *Response, error) { - u := "gists" - req, err := s.client.NewRequest("POST", u, gist) - if err != nil { - return nil, nil, err - } - - g := new(Gist) - resp, err := s.client.Do(ctx, req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// Edit a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#update-a-gist -func (s *GistsService) Edit(ctx context.Context, id string, gist *Gist) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("PATCH", u, gist) - if err != nil { - return nil, nil, err - } - - g := new(Gist) - resp, err := s.client.Do(ctx, req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// ListCommits lists commits of a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gist-commits -func (s *GistsService) ListCommits(ctx context.Context, id string, opts *ListOptions) ([]*GistCommit, *Response, error) { - u := fmt.Sprintf("gists/%v/commits", id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gistCommits []*GistCommit - resp, err := s.client.Do(ctx, req, &gistCommits) - if err != nil { - return nil, resp, err - } - - return gistCommits, resp, nil -} - -// Delete a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#delete-a-gist -func (s *GistsService) Delete(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Star a gist on behalf of authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#star-a-gist -func (s *GistsService) Star(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unstar a gist on a behalf of authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#unstar-a-gist -func (s *GistsService) Unstar(ctx context.Context, id string) (*Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// IsStarred checks if a gist is starred by authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#check-if-a-gist-is-starred -func (s *GistsService) IsStarred(ctx context.Context, id string) (bool, *Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - starred, err := parseBoolResponse(err) - return starred, resp, err -} - -// Fork a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#fork-a-gist -func (s *GistsService) Fork(ctx context.Context, id string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v/forks", id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - g := new(Gist) - resp, err := s.client.Do(ctx, req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// ListForks lists forks of a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gist-forks -func (s *GistsService) ListForks(ctx context.Context, id string, opts *ListOptions) ([]*GistFork, *Response, error) { - u := fmt.Sprintf("gists/%v/forks", id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var gistForks []*GistFork - resp, err := s.client.Do(ctx, req, &gistForks) - if err != nil { - return nil, resp, err - } - - return gistForks, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/gists_comments.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/gists_comments.go deleted file mode 100644 index ee0fbfa45f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/gists_comments.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GistComment represents a Gist comment. -type GistComment struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` -} - -func (g GistComment) String() string { - return Stringify(g) -} - -// ListComments lists all comments for a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/comments#list-gist-comments -func (s *GistsService) ListComments(ctx context.Context, gistID string, opts *ListOptions) ([]*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments", gistID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var comments []*GistComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetComment retrieves a single comment from a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/comments#get-a-gist-comment -func (s *GistsService) GetComment(ctx context.Context, gistID string, commentID int64) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// CreateComment creates a comment for a gist. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/comments#create-a-gist-comment -func (s *GistsService) CreateComment(ctx context.Context, gistID string, comment *GistComment) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments", gistID) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// EditComment edits an existing gist comment. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/comments#update-a-gist-comment -func (s *GistsService) EditComment(ctx context.Context, gistID string, commentID int64, comment *GistComment) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes a gist comment. -// -// GitHub API docs: https://docs.github.com/en/rest/gists/comments#delete-a-gist-comment -func (s *GistsService) DeleteComment(ctx context.Context, gistID string, commentID int64) (*Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/git.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/git.go deleted file mode 100644 index 8960de7b14..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/git.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// GitService handles communication with the git data related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/git/ -type GitService service diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_blobs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/git_blobs.go deleted file mode 100644 index da0485ccbe..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_blobs.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" -) - -// Blob represents a blob object. -type Blob struct { - Content *string `json:"content,omitempty"` - Encoding *string `json:"encoding,omitempty"` - SHA *string `json:"sha,omitempty"` - Size *int `json:"size,omitempty"` - URL *string `json:"url,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// GetBlob fetches a blob from a repo given a SHA. -// -// GitHub API docs: https://docs.github.com/en/rest/git/blobs#get-a-blob -func (s *GitService) GetBlob(ctx context.Context, owner string, repo string, sha string) (*Blob, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - blob := new(Blob) - resp, err := s.client.Do(ctx, req, blob) - if err != nil { - return nil, resp, err - } - - return blob, resp, nil -} - -// GetBlobRaw fetches a blob's contents from a repo. -// Unlike GetBlob, it returns the raw bytes rather than the base64-encoded data. -// -// GitHub API docs: https://docs.github.com/en/rest/git/blobs#get-a-blob -func (s *GitService) GetBlobRaw(ctx context.Context, owner, repo, sha string) ([]byte, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", "application/vnd.github.v3.raw") - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return nil, resp, err - } - - return buf.Bytes(), resp, nil -} - -// CreateBlob creates a blob object. -// -// GitHub API docs: https://docs.github.com/en/rest/git/blobs#create-a-blob -func (s *GitService) CreateBlob(ctx context.Context, owner string, repo string, blob *Blob) (*Blob, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs", owner, repo) - req, err := s.client.NewRequest("POST", u, blob) - if err != nil { - return nil, nil, err - } - - t := new(Blob) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_commits.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/git_commits.go deleted file mode 100644 index 862837c0d5..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_commits.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - - "github.com/ProtonMail/go-crypto/openpgp" -) - -// SignatureVerification represents GPG signature verification. -type SignatureVerification struct { - Verified *bool `json:"verified,omitempty"` - Reason *string `json:"reason,omitempty"` - Signature *string `json:"signature,omitempty"` - Payload *string `json:"payload,omitempty"` -} - -// Commit represents a GitHub commit. -type Commit struct { - SHA *string `json:"sha,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Message *string `json:"message,omitempty"` - Tree *Tree `json:"tree,omitempty"` - Parents []*Commit `json:"parents,omitempty"` - Stats *CommitStats `json:"stats,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - Verification *SignatureVerification `json:"verification,omitempty"` - NodeID *string `json:"node_id,omitempty"` - - // CommentCount is the number of GitHub comments on the commit. This - // is only populated for requests that fetch GitHub data like - // Pulls.ListCommits, Repositories.ListCommits, etc. - CommentCount *int `json:"comment_count,omitempty"` - - // SigningKey denotes a key to sign the commit with. If not nil this key will - // be used to sign the commit. The private key must be present and already - // decrypted. Ignored if Verification.Signature is defined. - SigningKey *openpgp.Entity `json:"-"` -} - -func (c Commit) String() string { - return Stringify(c) -} - -// CommitAuthor represents the author or committer of a commit. The commit -// author may not correspond to a GitHub User. -type CommitAuthor struct { - Date *Timestamp `json:"date,omitempty"` - Name *string `json:"name,omitempty"` - Email *string `json:"email,omitempty"` - - // The following fields are only populated by Webhook events. - Login *string `json:"username,omitempty"` // Renamed for go-github consistency. -} - -func (c CommitAuthor) String() string { - return Stringify(c) -} - -// GetCommit fetches the Commit object for a given SHA. -// -// GitHub API docs: https://docs.github.com/en/rest/git/commits#get-a-commit -func (s *GitService) GetCommit(ctx context.Context, owner string, repo string, sha string) (*Commit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/commits/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// createCommit represents the body of a CreateCommit request. -type createCommit struct { - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Message *string `json:"message,omitempty"` - Tree *string `json:"tree,omitempty"` - Parents []string `json:"parents,omitempty"` - Signature *string `json:"signature,omitempty"` -} - -// CreateCommit creates a new commit in a repository. -// commit must not be nil. -// -// The commit.Committer is optional and will be filled with the commit.Author -// data if omitted. If the commit.Author is omitted, it will be filled in with -// the authenticated user’s information and the current date. -// -// GitHub API docs: https://docs.github.com/en/rest/git/commits#create-a-commit -func (s *GitService) CreateCommit(ctx context.Context, owner string, repo string, commit *Commit) (*Commit, *Response, error) { - if commit == nil { - return nil, nil, fmt.Errorf("commit must be provided") - } - - u := fmt.Sprintf("repos/%v/%v/git/commits", owner, repo) - - parents := make([]string, len(commit.Parents)) - for i, parent := range commit.Parents { - parents[i] = *parent.SHA - } - - body := &createCommit{ - Author: commit.Author, - Committer: commit.Committer, - Message: commit.Message, - Parents: parents, - } - if commit.Tree != nil { - body.Tree = commit.Tree.SHA - } - if commit.SigningKey != nil { - signature, err := createSignature(commit.SigningKey, body) - if err != nil { - return nil, nil, err - } - body.Signature = &signature - } - if commit.Verification != nil { - body.Signature = commit.Verification.Signature - } - - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -func createSignature(signingKey *openpgp.Entity, commit *createCommit) (string, error) { - if signingKey == nil || commit == nil { - return "", errors.New("createSignature: invalid parameters") - } - - message, err := createSignatureMessage(commit) - if err != nil { - return "", err - } - - writer := new(bytes.Buffer) - reader := bytes.NewReader([]byte(message)) - if err := openpgp.ArmoredDetachSign(writer, signingKey, reader, nil); err != nil { - return "", err - } - - return writer.String(), nil -} - -func createSignatureMessage(commit *createCommit) (string, error) { - if commit == nil || commit.Message == nil || *commit.Message == "" || commit.Author == nil { - return "", errors.New("createSignatureMessage: invalid parameters") - } - - var message []string - - if commit.Tree != nil { - message = append(message, fmt.Sprintf("tree %s", *commit.Tree)) - } - - for _, parent := range commit.Parents { - message = append(message, fmt.Sprintf("parent %s", parent)) - } - - message = append(message, fmt.Sprintf("author %s <%s> %d %s", commit.Author.GetName(), commit.Author.GetEmail(), commit.Author.GetDate().Unix(), commit.Author.GetDate().Format("-0700"))) - - committer := commit.Committer - if committer == nil { - committer = commit.Author - } - - // There needs to be a double newline after committer - message = append(message, fmt.Sprintf("committer %s <%s> %d %s\n", committer.GetName(), committer.GetEmail(), committer.GetDate().Unix(), committer.GetDate().Format("-0700"))) - message = append(message, *commit.Message) - - return strings.Join(message, "\n"), nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_refs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/git_refs.go deleted file mode 100644 index e839c30f66..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_refs.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/url" - "strings" -) - -// Reference represents a GitHub reference. -type Reference struct { - Ref *string `json:"ref"` - URL *string `json:"url"` - Object *GitObject `json:"object"` - NodeID *string `json:"node_id,omitempty"` -} - -func (r Reference) String() string { - return Stringify(r) -} - -// GitObject represents a Git object. -type GitObject struct { - Type *string `json:"type"` - SHA *string `json:"sha"` - URL *string `json:"url"` -} - -func (o GitObject) String() string { - return Stringify(o) -} - -// createRefRequest represents the payload for creating a reference. -type createRefRequest struct { - Ref *string `json:"ref"` - SHA *string `json:"sha"` -} - -// updateRefRequest represents the payload for updating a reference. -type updateRefRequest struct { - SHA *string `json:"sha"` - Force *bool `json:"force"` -} - -// GetRef fetches a single reference in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/git/refs#get-a-reference -func (s *GitService) GetRef(ctx context.Context, owner string, repo string, ref string) (*Reference, *Response, error) { - ref = strings.TrimPrefix(ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/ref/%v", owner, repo, refURLEscape(ref)) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// refURLEscape escapes every path segment of the given ref. Those must -// not contain escaped "/" - as "%2F" - or github will not recognize it. -func refURLEscape(ref string) string { - parts := strings.Split(ref, "/") - for i, s := range parts { - parts[i] = url.PathEscape(s) - } - return strings.Join(parts, "/") -} - -// ReferenceListOptions specifies optional parameters to the -// GitService.ListMatchingRefs method. -type ReferenceListOptions struct { - Ref string `url:"-"` - - ListOptions -} - -// ListMatchingRefs lists references in a repository that match a supplied ref. -// Use an empty ref to list all references. -// -// GitHub API docs: https://docs.github.com/en/rest/git/refs#list-matching-references -func (s *GitService) ListMatchingRefs(ctx context.Context, owner, repo string, opts *ReferenceListOptions) ([]*Reference, *Response, error) { - var ref string - if opts != nil { - ref = strings.TrimPrefix(opts.Ref, "refs/") - } - u := fmt.Sprintf("repos/%v/%v/git/matching-refs/%v", owner, repo, refURLEscape(ref)) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rs []*Reference - resp, err := s.client.Do(ctx, req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// CreateRef creates a new ref in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/git/refs#create-a-reference -func (s *GitService) CreateRef(ctx context.Context, owner string, repo string, ref *Reference) (*Reference, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/refs", owner, repo) - req, err := s.client.NewRequest("POST", u, &createRefRequest{ - // back-compat with previous behavior that didn't require 'refs/' prefix - Ref: String("refs/" + strings.TrimPrefix(*ref.Ref, "refs/")), - SHA: ref.Object.SHA, - }) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// UpdateRef updates an existing ref in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/git/refs#update-a-reference -func (s *GitService) UpdateRef(ctx context.Context, owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) { - refPath := strings.TrimPrefix(*ref.Ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refURLEscape(refPath)) - req, err := s.client.NewRequest("PATCH", u, &updateRefRequest{ - SHA: ref.Object.SHA, - Force: &force, - }) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DeleteRef deletes a ref from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/git/refs#delete-a-reference -func (s *GitService) DeleteRef(ctx context.Context, owner string, repo string, ref string) (*Response, error) { - ref = strings.TrimPrefix(ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refURLEscape(ref)) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_tags.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/git_tags.go deleted file mode 100644 index 30d7b2c2d2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_tags.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Tag represents a tag object. -type Tag struct { - Tag *string `json:"tag,omitempty"` - SHA *string `json:"sha,omitempty"` - URL *string `json:"url,omitempty"` - Message *string `json:"message,omitempty"` - Tagger *CommitAuthor `json:"tagger,omitempty"` - Object *GitObject `json:"object,omitempty"` - Verification *SignatureVerification `json:"verification,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// createTagRequest represents the body of a CreateTag request. This is mostly -// identical to Tag with the exception that the object SHA and Type are -// top-level fields, rather than being nested inside a JSON object. -type createTagRequest struct { - Tag *string `json:"tag,omitempty"` - Message *string `json:"message,omitempty"` - Object *string `json:"object,omitempty"` - Type *string `json:"type,omitempty"` - Tagger *CommitAuthor `json:"tagger,omitempty"` -} - -// GetTag fetches a tag from a repo given a SHA. -// -// GitHub API docs: https://docs.github.com/en/rest/git/tags#get-a-tag -func (s *GitService) GetTag(ctx context.Context, owner string, repo string, sha string) (*Tag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/tags/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - tag := new(Tag) - resp, err := s.client.Do(ctx, req, tag) - if err != nil { - return nil, resp, err - } - - return tag, resp, nil -} - -// CreateTag creates a tag object. -// -// GitHub API docs: https://docs.github.com/en/rest/git/tags#create-a-tag-object -func (s *GitService) CreateTag(ctx context.Context, owner string, repo string, tag *Tag) (*Tag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/tags", owner, repo) - - // convert Tag into a createTagRequest - tagRequest := &createTagRequest{ - Tag: tag.Tag, - Message: tag.Message, - Tagger: tag.Tagger, - } - if tag.Object != nil { - tagRequest.Object = tag.Object.SHA - tagRequest.Type = tag.Object.Type - } - - req, err := s.client.NewRequest("POST", u, tagRequest) - if err != nil { - return nil, nil, err - } - - t := new(Tag) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_trees.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/git_trees.go deleted file mode 100644 index db28976e03..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/git_trees.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" -) - -// Tree represents a GitHub tree. -type Tree struct { - SHA *string `json:"sha,omitempty"` - Entries []*TreeEntry `json:"tree,omitempty"` - - // Truncated is true if the number of items in the tree - // exceeded GitHub's maximum limit and the Entries were truncated - // in the response. Only populated for requests that fetch - // trees like Git.GetTree. - Truncated *bool `json:"truncated,omitempty"` -} - -func (t Tree) String() string { - return Stringify(t) -} - -// TreeEntry represents the contents of a tree structure. TreeEntry can -// represent either a blob, a commit (in the case of a submodule), or another -// tree. -type TreeEntry struct { - SHA *string `json:"sha,omitempty"` - Path *string `json:"path,omitempty"` - Mode *string `json:"mode,omitempty"` - Type *string `json:"type,omitempty"` - Size *int `json:"size,omitempty"` - Content *string `json:"content,omitempty"` - URL *string `json:"url,omitempty"` -} - -func (t TreeEntry) String() string { - return Stringify(t) -} - -// treeEntryWithFileDelete is used internally to delete a file whose -// Content and SHA fields are empty. It does this by removing the "omitempty" -// tag modifier on the SHA field which causes the GitHub API to receive -// {"sha":null} and thereby delete the file. -type treeEntryWithFileDelete struct { - SHA *string `json:"sha"` - Path *string `json:"path,omitempty"` - Mode *string `json:"mode,omitempty"` - Type *string `json:"type,omitempty"` - Size *int `json:"size,omitempty"` - Content *string `json:"content,omitempty"` - URL *string `json:"url,omitempty"` -} - -func (t *TreeEntry) MarshalJSON() ([]byte, error) { - if t.SHA == nil && t.Content == nil { - return json.Marshal(struct { - SHA *string `json:"sha"` - Path *string `json:"path,omitempty"` - Mode *string `json:"mode,omitempty"` - Type *string `json:"type,omitempty"` - }{ - nil, - t.Path, - t.Mode, - t.Type, - }) - } - return json.Marshal(struct { - SHA *string `json:"sha,omitempty"` - Path *string `json:"path,omitempty"` - Mode *string `json:"mode,omitempty"` - Type *string `json:"type,omitempty"` - Size *int `json:"size,omitempty"` - Content *string `json:"content,omitempty"` - URL *string `json:"url,omitempty"` - }{ - SHA: t.SHA, - Path: t.Path, - Mode: t.Mode, - Type: t.Type, - Size: t.Size, - Content: t.Content, - URL: t.URL, - }) -} - -// GetTree fetches the Tree object for a given sha hash from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/git/trees#get-a-tree -func (s *GitService) GetTree(ctx context.Context, owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/trees/%v", owner, repo, sha) - if recursive { - u += "?recursive=1" - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Tree) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// createTree represents the body of a CreateTree request. -type createTree struct { - BaseTree string `json:"base_tree,omitempty"` - Entries []interface{} `json:"tree"` -} - -// CreateTree creates a new tree in a repository. If both a tree and a nested -// path modifying that tree are specified, it will overwrite the contents of -// that tree with the new path contents and write a new tree out. -// -// GitHub API docs: https://docs.github.com/en/rest/git/trees#create-a-tree -func (s *GitService) CreateTree(ctx context.Context, owner string, repo string, baseTree string, entries []*TreeEntry) (*Tree, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/trees", owner, repo) - - newEntries := make([]interface{}, 0, len(entries)) - for _, entry := range entries { - if entry.Content == nil && entry.SHA == nil { - newEntries = append(newEntries, treeEntryWithFileDelete{ - Path: entry.Path, - Mode: entry.Mode, - Type: entry.Type, - Size: entry.Size, - URL: entry.URL, - }) - continue - } - newEntries = append(newEntries, entry) - } - - body := &createTree{ - BaseTree: baseTree, - Entries: newEntries, - } - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - t := new(Tree) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/github-accessors.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/github-accessors.go deleted file mode 100644 index 8acb72b042..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/github-accessors.go +++ /dev/null @@ -1,23255 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by gen-accessors; DO NOT EDIT. -// Instead, please run "go generate ./..." as described here: -// https://github.com/google/go-github/blob/master/CONTRIBUTING.md#submitting-a-patch - -package github - -import ( - "encoding/json" - "time" -) - -// GetRetryAfter returns the RetryAfter field if it's non-nil, zero value otherwise. -func (a *AbuseRateLimitError) GetRetryAfter() time.Duration { - if a == nil || a.RetryAfter == nil { - return 0 - } - return *a.RetryAfter -} - -// GetGithubOwnedAllowed returns the GithubOwnedAllowed field if it's non-nil, zero value otherwise. -func (a *ActionsAllowed) GetGithubOwnedAllowed() bool { - if a == nil || a.GithubOwnedAllowed == nil { - return false - } - return *a.GithubOwnedAllowed -} - -// GetVerifiedAllowed returns the VerifiedAllowed field if it's non-nil, zero value otherwise. -func (a *ActionsAllowed) GetVerifiedAllowed() bool { - if a == nil || a.VerifiedAllowed == nil { - return false - } - return *a.VerifiedAllowed -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *ActionsCache) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *ActionsCache) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (a *ActionsCache) GetKey() string { - if a == nil || a.Key == nil { - return "" - } - return *a.Key -} - -// GetLastAccessedAt returns the LastAccessedAt field if it's non-nil, zero value otherwise. -func (a *ActionsCache) GetLastAccessedAt() Timestamp { - if a == nil || a.LastAccessedAt == nil { - return Timestamp{} - } - return *a.LastAccessedAt -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (a *ActionsCache) GetRef() string { - if a == nil || a.Ref == nil { - return "" - } - return *a.Ref -} - -// GetSizeInBytes returns the SizeInBytes field if it's non-nil, zero value otherwise. -func (a *ActionsCache) GetSizeInBytes() int64 { - if a == nil || a.SizeInBytes == nil { - return 0 - } - return *a.SizeInBytes -} - -// GetVersion returns the Version field if it's non-nil, zero value otherwise. -func (a *ActionsCache) GetVersion() string { - if a == nil || a.Version == nil { - return "" - } - return *a.Version -} - -// GetDirection returns the Direction field if it's non-nil, zero value otherwise. -func (a *ActionsCacheListOptions) GetDirection() string { - if a == nil || a.Direction == nil { - return "" - } - return *a.Direction -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (a *ActionsCacheListOptions) GetKey() string { - if a == nil || a.Key == nil { - return "" - } - return *a.Key -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (a *ActionsCacheListOptions) GetRef() string { - if a == nil || a.Ref == nil { - return "" - } - return *a.Ref -} - -// GetSort returns the Sort field if it's non-nil, zero value otherwise. -func (a *ActionsCacheListOptions) GetSort() string { - if a == nil || a.Sort == nil { - return "" - } - return *a.Sort -} - -// GetAllowedActions returns the AllowedActions field if it's non-nil, zero value otherwise. -func (a *ActionsPermissions) GetAllowedActions() string { - if a == nil || a.AllowedActions == nil { - return "" - } - return *a.AllowedActions -} - -// GetEnabledRepositories returns the EnabledRepositories field if it's non-nil, zero value otherwise. -func (a *ActionsPermissions) GetEnabledRepositories() string { - if a == nil || a.EnabledRepositories == nil { - return "" - } - return *a.EnabledRepositories -} - -// GetSelectedActionsURL returns the SelectedActionsURL field if it's non-nil, zero value otherwise. -func (a *ActionsPermissions) GetSelectedActionsURL() string { - if a == nil || a.SelectedActionsURL == nil { - return "" - } - return *a.SelectedActionsURL -} - -// GetAllowedActions returns the AllowedActions field if it's non-nil, zero value otherwise. -func (a *ActionsPermissionsRepository) GetAllowedActions() string { - if a == nil || a.AllowedActions == nil { - return "" - } - return *a.AllowedActions -} - -// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. -func (a *ActionsPermissionsRepository) GetEnabled() bool { - if a == nil || a.Enabled == nil { - return false - } - return *a.Enabled -} - -// GetSelectedActionsURL returns the SelectedActionsURL field if it's non-nil, zero value otherwise. -func (a *ActionsPermissionsRepository) GetSelectedActionsURL() string { - if a == nil || a.SelectedActionsURL == nil { - return "" - } - return *a.SelectedActionsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *ActionsVariable) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetSelectedRepositoriesURL returns the SelectedRepositoriesURL field if it's non-nil, zero value otherwise. -func (a *ActionsVariable) GetSelectedRepositoriesURL() string { - if a == nil || a.SelectedRepositoriesURL == nil { - return "" - } - return *a.SelectedRepositoriesURL -} - -// GetSelectedRepositoryIDs returns the SelectedRepositoryIDs field. -func (a *ActionsVariable) GetSelectedRepositoryIDs() *SelectedRepoIDs { - if a == nil { - return nil - } - return a.SelectedRepositoryIDs -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *ActionsVariable) GetUpdatedAt() Timestamp { - if a == nil || a.UpdatedAt == nil { - return Timestamp{} - } - return *a.UpdatedAt -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (a *ActionsVariable) GetVisibility() string { - if a == nil || a.Visibility == nil { - return "" - } - return *a.Visibility -} - -// GetCountryCode returns the CountryCode field if it's non-nil, zero value otherwise. -func (a *ActorLocation) GetCountryCode() string { - if a == nil || a.CountryCode == nil { - return "" - } - return *a.CountryCode -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (a *AdminEnforcedChanges) GetFrom() bool { - if a == nil || a.From == nil { - return false - } - return *a.From -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *AdminEnforcement) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetComments returns the Comments field. -func (a *AdminStats) GetComments() *CommentStats { - if a == nil { - return nil - } - return a.Comments -} - -// GetGists returns the Gists field. -func (a *AdminStats) GetGists() *GistStats { - if a == nil { - return nil - } - return a.Gists -} - -// GetHooks returns the Hooks field. -func (a *AdminStats) GetHooks() *HookStats { - if a == nil { - return nil - } - return a.Hooks -} - -// GetIssues returns the Issues field. -func (a *AdminStats) GetIssues() *IssueStats { - if a == nil { - return nil - } - return a.Issues -} - -// GetMilestones returns the Milestones field. -func (a *AdminStats) GetMilestones() *MilestoneStats { - if a == nil { - return nil - } - return a.Milestones -} - -// GetOrgs returns the Orgs field. -func (a *AdminStats) GetOrgs() *OrgStats { - if a == nil { - return nil - } - return a.Orgs -} - -// GetPages returns the Pages field. -func (a *AdminStats) GetPages() *PageStats { - if a == nil { - return nil - } - return a.Pages -} - -// GetPulls returns the Pulls field. -func (a *AdminStats) GetPulls() *PullStats { - if a == nil { - return nil - } - return a.Pulls -} - -// GetRepos returns the Repos field. -func (a *AdminStats) GetRepos() *RepoStats { - if a == nil { - return nil - } - return a.Repos -} - -// GetUsers returns the Users field. -func (a *AdminStats) GetUsers() *UserStats { - if a == nil { - return nil - } - return a.Users -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (a *AdvancedSecurity) GetStatus() string { - if a == nil || a.Status == nil { - return "" - } - return *a.Status -} - -// GetLastPushedDate returns the LastPushedDate field if it's non-nil, zero value otherwise. -func (a *AdvancedSecurityCommittersBreakdown) GetLastPushedDate() string { - if a == nil || a.LastPushedDate == nil { - return "" - } - return *a.LastPushedDate -} - -// GetUserLogin returns the UserLogin field if it's non-nil, zero value otherwise. -func (a *AdvancedSecurityCommittersBreakdown) GetUserLogin() string { - if a == nil || a.UserLogin == nil { - return "" - } - return *a.UserLogin -} - -// GetScore returns the Score field. -func (a *AdvisoryCVSs) GetScore() *float64 { - if a == nil { - return nil - } - return a.Score -} - -// GetVectorString returns the VectorString field if it's non-nil, zero value otherwise. -func (a *AdvisoryCVSs) GetVectorString() string { - if a == nil || a.VectorString == nil { - return "" - } - return *a.VectorString -} - -// GetCWEID returns the CWEID field if it's non-nil, zero value otherwise. -func (a *AdvisoryCWEs) GetCWEID() string { - if a == nil || a.CWEID == nil { - return "" - } - return *a.CWEID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *AdvisoryCWEs) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (a *AdvisoryIdentifier) GetType() string { - if a == nil || a.Type == nil { - return "" - } - return *a.Type -} - -// GetValue returns the Value field if it's non-nil, zero value otherwise. -func (a *AdvisoryIdentifier) GetValue() string { - if a == nil || a.Value == nil { - return "" - } - return *a.Value -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *AdvisoryReference) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetFirstPatchedVersion returns the FirstPatchedVersion field. -func (a *AdvisoryVulnerability) GetFirstPatchedVersion() *FirstPatchedVersion { - if a == nil { - return nil - } - return a.FirstPatchedVersion -} - -// GetPackage returns the Package field. -func (a *AdvisoryVulnerability) GetPackage() *VulnerabilityPackage { - if a == nil { - return nil - } - return a.Package -} - -// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. -func (a *AdvisoryVulnerability) GetSeverity() string { - if a == nil || a.Severity == nil { - return "" - } - return *a.Severity -} - -// GetVulnerableVersionRange returns the VulnerableVersionRange field if it's non-nil, zero value otherwise. -func (a *AdvisoryVulnerability) GetVulnerableVersionRange() string { - if a == nil || a.VulnerableVersionRange == nil { - return "" - } - return *a.VulnerableVersionRange -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (a *Alert) GetClosedAt() Timestamp { - if a == nil || a.ClosedAt == nil { - return Timestamp{} - } - return *a.ClosedAt -} - -// GetClosedBy returns the ClosedBy field. -func (a *Alert) GetClosedBy() *User { - if a == nil { - return nil - } - return a.ClosedBy -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *Alert) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetDismissedAt returns the DismissedAt field if it's non-nil, zero value otherwise. -func (a *Alert) GetDismissedAt() Timestamp { - if a == nil || a.DismissedAt == nil { - return Timestamp{} - } - return *a.DismissedAt -} - -// GetDismissedBy returns the DismissedBy field. -func (a *Alert) GetDismissedBy() *User { - if a == nil { - return nil - } - return a.DismissedBy -} - -// GetDismissedComment returns the DismissedComment field if it's non-nil, zero value otherwise. -func (a *Alert) GetDismissedComment() string { - if a == nil || a.DismissedComment == nil { - return "" - } - return *a.DismissedComment -} - -// GetDismissedReason returns the DismissedReason field if it's non-nil, zero value otherwise. -func (a *Alert) GetDismissedReason() string { - if a == nil || a.DismissedReason == nil { - return "" - } - return *a.DismissedReason -} - -// GetFixedAt returns the FixedAt field if it's non-nil, zero value otherwise. -func (a *Alert) GetFixedAt() Timestamp { - if a == nil || a.FixedAt == nil { - return Timestamp{} - } - return *a.FixedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (a *Alert) GetHTMLURL() string { - if a == nil || a.HTMLURL == nil { - return "" - } - return *a.HTMLURL -} - -// GetInstancesURL returns the InstancesURL field if it's non-nil, zero value otherwise. -func (a *Alert) GetInstancesURL() string { - if a == nil || a.InstancesURL == nil { - return "" - } - return *a.InstancesURL -} - -// GetMostRecentInstance returns the MostRecentInstance field. -func (a *Alert) GetMostRecentInstance() *MostRecentInstance { - if a == nil { - return nil - } - return a.MostRecentInstance -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (a *Alert) GetNumber() int { - if a == nil || a.Number == nil { - return 0 - } - return *a.Number -} - -// GetRepository returns the Repository field. -func (a *Alert) GetRepository() *Repository { - if a == nil { - return nil - } - return a.Repository -} - -// GetRule returns the Rule field. -func (a *Alert) GetRule() *Rule { - if a == nil { - return nil - } - return a.Rule -} - -// GetRuleDescription returns the RuleDescription field if it's non-nil, zero value otherwise. -func (a *Alert) GetRuleDescription() string { - if a == nil || a.RuleDescription == nil { - return "" - } - return *a.RuleDescription -} - -// GetRuleID returns the RuleID field if it's non-nil, zero value otherwise. -func (a *Alert) GetRuleID() string { - if a == nil || a.RuleID == nil { - return "" - } - return *a.RuleID -} - -// GetRuleSeverity returns the RuleSeverity field if it's non-nil, zero value otherwise. -func (a *Alert) GetRuleSeverity() string { - if a == nil || a.RuleSeverity == nil { - return "" - } - return *a.RuleSeverity -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (a *Alert) GetState() string { - if a == nil || a.State == nil { - return "" - } - return *a.State -} - -// GetTool returns the Tool field. -func (a *Alert) GetTool() *Tool { - if a == nil { - return nil - } - return a.Tool -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *Alert) GetUpdatedAt() Timestamp { - if a == nil || a.UpdatedAt == nil { - return Timestamp{} - } - return *a.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *Alert) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (a *AllowDeletionsEnforcementLevelChanges) GetFrom() string { - if a == nil || a.From == nil { - return "" - } - return *a.From -} - -// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. -func (a *AllowForkSyncing) GetEnabled() bool { - if a == nil || a.Enabled == nil { - return false - } - return *a.Enabled -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (a *AnalysesListOptions) GetRef() string { - if a == nil || a.Ref == nil { - return "" - } - return *a.Ref -} - -// GetSarifID returns the SarifID field if it's non-nil, zero value otherwise. -func (a *AnalysesListOptions) GetSarifID() string { - if a == nil || a.SarifID == nil { - return "" - } - return *a.SarifID -} - -// GetSSHKeyFingerprints returns the SSHKeyFingerprints map if it's non-nil, an empty map otherwise. -func (a *APIMeta) GetSSHKeyFingerprints() map[string]string { - if a == nil || a.SSHKeyFingerprints == nil { - return map[string]string{} - } - return a.SSHKeyFingerprints -} - -// GetVerifiablePasswordAuthentication returns the VerifiablePasswordAuthentication field if it's non-nil, zero value otherwise. -func (a *APIMeta) GetVerifiablePasswordAuthentication() bool { - if a == nil || a.VerifiablePasswordAuthentication == nil { - return false - } - return *a.VerifiablePasswordAuthentication -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *App) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (a *App) GetDescription() string { - if a == nil || a.Description == nil { - return "" - } - return *a.Description -} - -// GetExternalURL returns the ExternalURL field if it's non-nil, zero value otherwise. -func (a *App) GetExternalURL() string { - if a == nil || a.ExternalURL == nil { - return "" - } - return *a.ExternalURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (a *App) GetHTMLURL() string { - if a == nil || a.HTMLURL == nil { - return "" - } - return *a.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *App) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetInstallationsCount returns the InstallationsCount field if it's non-nil, zero value otherwise. -func (a *App) GetInstallationsCount() int { - if a == nil || a.InstallationsCount == nil { - return 0 - } - return *a.InstallationsCount -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *App) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (a *App) GetNodeID() string { - if a == nil || a.NodeID == nil { - return "" - } - return *a.NodeID -} - -// GetOwner returns the Owner field. -func (a *App) GetOwner() *User { - if a == nil { - return nil - } - return a.Owner -} - -// GetPermissions returns the Permissions field. -func (a *App) GetPermissions() *InstallationPermissions { - if a == nil { - return nil - } - return a.Permissions -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (a *App) GetSlug() string { - if a == nil || a.Slug == nil { - return "" - } - return *a.Slug -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *App) GetUpdatedAt() Timestamp { - if a == nil || a.UpdatedAt == nil { - return Timestamp{} - } - return *a.UpdatedAt -} - -// GetClientID returns the ClientID field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetClientID() string { - if a == nil || a.ClientID == nil { - return "" - } - return *a.ClientID -} - -// GetClientSecret returns the ClientSecret field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetClientSecret() string { - if a == nil || a.ClientSecret == nil { - return "" - } - return *a.ClientSecret -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetDescription() string { - if a == nil || a.Description == nil { - return "" - } - return *a.Description -} - -// GetExternalURL returns the ExternalURL field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetExternalURL() string { - if a == nil || a.ExternalURL == nil { - return "" - } - return *a.ExternalURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetHTMLURL() string { - if a == nil || a.HTMLURL == nil { - return "" - } - return *a.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetNodeID() string { - if a == nil || a.NodeID == nil { - return "" - } - return *a.NodeID -} - -// GetOwner returns the Owner field. -func (a *AppConfig) GetOwner() *User { - if a == nil { - return nil - } - return a.Owner -} - -// GetPEM returns the PEM field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetPEM() string { - if a == nil || a.PEM == nil { - return "" - } - return *a.PEM -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetSlug() string { - if a == nil || a.Slug == nil { - return "" - } - return *a.Slug -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetUpdatedAt() Timestamp { - if a == nil || a.UpdatedAt == nil { - return Timestamp{} - } - return *a.UpdatedAt -} - -// GetWebhookSecret returns the WebhookSecret field if it's non-nil, zero value otherwise. -func (a *AppConfig) GetWebhookSecret() string { - if a == nil || a.WebhookSecret == nil { - return "" - } - return *a.WebhookSecret -} - -// GetArchiveDownloadURL returns the ArchiveDownloadURL field if it's non-nil, zero value otherwise. -func (a *Artifact) GetArchiveDownloadURL() string { - if a == nil || a.ArchiveDownloadURL == nil { - return "" - } - return *a.ArchiveDownloadURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *Artifact) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetExpired returns the Expired field if it's non-nil, zero value otherwise. -func (a *Artifact) GetExpired() bool { - if a == nil || a.Expired == nil { - return false - } - return *a.Expired -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (a *Artifact) GetExpiresAt() Timestamp { - if a == nil || a.ExpiresAt == nil { - return Timestamp{} - } - return *a.ExpiresAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *Artifact) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *Artifact) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (a *Artifact) GetNodeID() string { - if a == nil || a.NodeID == nil { - return "" - } - return *a.NodeID -} - -// GetSizeInBytes returns the SizeInBytes field if it's non-nil, zero value otherwise. -func (a *Artifact) GetSizeInBytes() int64 { - if a == nil || a.SizeInBytes == nil { - return 0 - } - return *a.SizeInBytes -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *Artifact) GetUpdatedAt() Timestamp { - if a == nil || a.UpdatedAt == nil { - return Timestamp{} - } - return *a.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *Artifact) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetWorkflowRun returns the WorkflowRun field. -func (a *Artifact) GetWorkflowRun() *ArtifactWorkflowRun { - if a == nil { - return nil - } - return a.WorkflowRun -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (a *ArtifactList) GetTotalCount() int64 { - if a == nil || a.TotalCount == nil { - return 0 - } - return *a.TotalCount -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (a *ArtifactWorkflowRun) GetHeadBranch() string { - if a == nil || a.HeadBranch == nil { - return "" - } - return *a.HeadBranch -} - -// GetHeadRepositoryID returns the HeadRepositoryID field if it's non-nil, zero value otherwise. -func (a *ArtifactWorkflowRun) GetHeadRepositoryID() int64 { - if a == nil || a.HeadRepositoryID == nil { - return 0 - } - return *a.HeadRepositoryID -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (a *ArtifactWorkflowRun) GetHeadSHA() string { - if a == nil || a.HeadSHA == nil { - return "" - } - return *a.HeadSHA -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *ArtifactWorkflowRun) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise. -func (a *ArtifactWorkflowRun) GetRepositoryID() int64 { - if a == nil || a.RepositoryID == nil { - return 0 - } - return *a.RepositoryID -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (a *Attachment) GetBody() string { - if a == nil || a.Body == nil { - return "" - } - return *a.Body -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *Attachment) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (a *Attachment) GetTitle() string { - if a == nil || a.Title == nil { - return "" - } - return *a.Title -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetAction() string { - if a == nil || a.Action == nil { - return "" - } - return *a.Action -} - -// GetActive returns the Active field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetActive() bool { - if a == nil || a.Active == nil { - return false - } - return *a.Active -} - -// GetActiveWas returns the ActiveWas field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetActiveWas() bool { - if a == nil || a.ActiveWas == nil { - return false - } - return *a.ActiveWas -} - -// GetActor returns the Actor field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetActor() string { - if a == nil || a.Actor == nil { - return "" - } - return *a.Actor -} - -// GetActorIP returns the ActorIP field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetActorIP() string { - if a == nil || a.ActorIP == nil { - return "" - } - return *a.ActorIP -} - -// GetActorLocation returns the ActorLocation field. -func (a *AuditEntry) GetActorLocation() *ActorLocation { - if a == nil { - return nil - } - return a.ActorLocation -} - -// GetBlockedUser returns the BlockedUser field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetBlockedUser() string { - if a == nil || a.BlockedUser == nil { - return "" - } - return *a.BlockedUser -} - -// GetBusiness returns the Business field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetBusiness() string { - if a == nil || a.Business == nil { - return "" - } - return *a.Business -} - -// GetCancelledAt returns the CancelledAt field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetCancelledAt() Timestamp { - if a == nil || a.CancelledAt == nil { - return Timestamp{} - } - return *a.CancelledAt -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetCompletedAt() Timestamp { - if a == nil || a.CompletedAt == nil { - return Timestamp{} - } - return *a.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetConclusion() string { - if a == nil || a.Conclusion == nil { - return "" - } - return *a.Conclusion -} - -// GetConfig returns the Config field. -func (a *AuditEntry) GetConfig() *HookConfig { - if a == nil { - return nil - } - return a.Config -} - -// GetConfigWas returns the ConfigWas field. -func (a *AuditEntry) GetConfigWas() *HookConfig { - if a == nil { - return nil - } - return a.ConfigWas -} - -// GetContentType returns the ContentType field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetContentType() string { - if a == nil || a.ContentType == nil { - return "" - } - return *a.ContentType -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetDeployKeyFingerprint returns the DeployKeyFingerprint field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetDeployKeyFingerprint() string { - if a == nil || a.DeployKeyFingerprint == nil { - return "" - } - return *a.DeployKeyFingerprint -} - -// GetDocumentID returns the DocumentID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetDocumentID() string { - if a == nil || a.DocumentID == nil { - return "" - } - return *a.DocumentID -} - -// GetEmoji returns the Emoji field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetEmoji() string { - if a == nil || a.Emoji == nil { - return "" - } - return *a.Emoji -} - -// GetEnvironmentName returns the EnvironmentName field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetEnvironmentName() string { - if a == nil || a.EnvironmentName == nil { - return "" - } - return *a.EnvironmentName -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetEvent() string { - if a == nil || a.Event == nil { - return "" - } - return *a.Event -} - -// GetExplanation returns the Explanation field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetExplanation() string { - if a == nil || a.Explanation == nil { - return "" - } - return *a.Explanation -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetFingerprint() string { - if a == nil || a.Fingerprint == nil { - return "" - } - return *a.Fingerprint -} - -// GetHashedToken returns the HashedToken field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetHashedToken() string { - if a == nil || a.HashedToken == nil { - return "" - } - return *a.HashedToken -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetHeadBranch() string { - if a == nil || a.HeadBranch == nil { - return "" - } - return *a.HeadBranch -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetHeadSHA() string { - if a == nil || a.HeadSHA == nil { - return "" - } - return *a.HeadSHA -} - -// GetHookID returns the HookID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetHookID() int64 { - if a == nil || a.HookID == nil { - return 0 - } - return *a.HookID -} - -// GetIsHostedRunner returns the IsHostedRunner field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetIsHostedRunner() bool { - if a == nil || a.IsHostedRunner == nil { - return false - } - return *a.IsHostedRunner -} - -// GetJobName returns the JobName field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetJobName() string { - if a == nil || a.JobName == nil { - return "" - } - return *a.JobName -} - -// GetJobWorkflowRef returns the JobWorkflowRef field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetJobWorkflowRef() string { - if a == nil || a.JobWorkflowRef == nil { - return "" - } - return *a.JobWorkflowRef -} - -// GetLimitedAvailability returns the LimitedAvailability field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetLimitedAvailability() bool { - if a == nil || a.LimitedAvailability == nil { - return false - } - return *a.LimitedAvailability -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetMessage() string { - if a == nil || a.Message == nil { - return "" - } - return *a.Message -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetOAuthApplicationID returns the OAuthApplicationID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetOAuthApplicationID() int64 { - if a == nil || a.OAuthApplicationID == nil { - return 0 - } - return *a.OAuthApplicationID -} - -// GetOldPermission returns the OldPermission field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetOldPermission() string { - if a == nil || a.OldPermission == nil { - return "" - } - return *a.OldPermission -} - -// GetOldUser returns the OldUser field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetOldUser() string { - if a == nil || a.OldUser == nil { - return "" - } - return *a.OldUser -} - -// GetOpenSSHPublicKey returns the OpenSSHPublicKey field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetOpenSSHPublicKey() string { - if a == nil || a.OpenSSHPublicKey == nil { - return "" - } - return *a.OpenSSHPublicKey -} - -// GetOperationType returns the OperationType field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetOperationType() string { - if a == nil || a.OperationType == nil { - return "" - } - return *a.OperationType -} - -// GetOrg returns the Org field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetOrg() string { - if a == nil || a.Org == nil { - return "" - } - return *a.Org -} - -// GetOrgID returns the OrgID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetOrgID() int64 { - if a == nil || a.OrgID == nil { - return 0 - } - return *a.OrgID -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetPermission() string { - if a == nil || a.Permission == nil { - return "" - } - return *a.Permission -} - -// GetPreviousVisibility returns the PreviousVisibility field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetPreviousVisibility() string { - if a == nil || a.PreviousVisibility == nil { - return "" - } - return *a.PreviousVisibility -} - -// GetProgrammaticAccessType returns the ProgrammaticAccessType field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetProgrammaticAccessType() string { - if a == nil || a.ProgrammaticAccessType == nil { - return "" - } - return *a.ProgrammaticAccessType -} - -// GetPullRequestID returns the PullRequestID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetPullRequestID() int64 { - if a == nil || a.PullRequestID == nil { - return 0 - } - return *a.PullRequestID -} - -// GetPullRequestTitle returns the PullRequestTitle field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetPullRequestTitle() string { - if a == nil || a.PullRequestTitle == nil { - return "" - } - return *a.PullRequestTitle -} - -// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetPullRequestURL() string { - if a == nil || a.PullRequestURL == nil { - return "" - } - return *a.PullRequestURL -} - -// GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetReadOnly() string { - if a == nil || a.ReadOnly == nil { - return "" - } - return *a.ReadOnly -} - -// GetRepo returns the Repo field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRepo() string { - if a == nil || a.Repo == nil { - return "" - } - return *a.Repo -} - -// GetRepository returns the Repository field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRepository() string { - if a == nil || a.Repository == nil { - return "" - } - return *a.Repository -} - -// GetRepositoryPublic returns the RepositoryPublic field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRepositoryPublic() bool { - if a == nil || a.RepositoryPublic == nil { - return false - } - return *a.RepositoryPublic -} - -// GetRunAttempt returns the RunAttempt field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRunAttempt() int64 { - if a == nil || a.RunAttempt == nil { - return 0 - } - return *a.RunAttempt -} - -// GetRunnerGroupID returns the RunnerGroupID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRunnerGroupID() int64 { - if a == nil || a.RunnerGroupID == nil { - return 0 - } - return *a.RunnerGroupID -} - -// GetRunnerGroupName returns the RunnerGroupName field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRunnerGroupName() string { - if a == nil || a.RunnerGroupName == nil { - return "" - } - return *a.RunnerGroupName -} - -// GetRunnerID returns the RunnerID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRunnerID() int64 { - if a == nil || a.RunnerID == nil { - return 0 - } - return *a.RunnerID -} - -// GetRunnerName returns the RunnerName field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRunnerName() string { - if a == nil || a.RunnerName == nil { - return "" - } - return *a.RunnerName -} - -// GetRunNumber returns the RunNumber field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetRunNumber() int64 { - if a == nil || a.RunNumber == nil { - return 0 - } - return *a.RunNumber -} - -// GetSourceVersion returns the SourceVersion field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetSourceVersion() string { - if a == nil || a.SourceVersion == nil { - return "" - } - return *a.SourceVersion -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetStartedAt() Timestamp { - if a == nil || a.StartedAt == nil { - return Timestamp{} - } - return *a.StartedAt -} - -// GetTargetLogin returns the TargetLogin field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTargetLogin() string { - if a == nil || a.TargetLogin == nil { - return "" - } - return *a.TargetLogin -} - -// GetTargetVersion returns the TargetVersion field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTargetVersion() string { - if a == nil || a.TargetVersion == nil { - return "" - } - return *a.TargetVersion -} - -// GetTeam returns the Team field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTeam() string { - if a == nil || a.Team == nil { - return "" - } - return *a.Team -} - -// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTimestamp() Timestamp { - if a == nil || a.Timestamp == nil { - return Timestamp{} - } - return *a.Timestamp -} - -// GetTokenID returns the TokenID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTokenID() int64 { - if a == nil || a.TokenID == nil { - return 0 - } - return *a.TokenID -} - -// GetTokenScopes returns the TokenScopes field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTokenScopes() string { - if a == nil || a.TokenScopes == nil { - return "" - } - return *a.TokenScopes -} - -// GetTopic returns the Topic field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTopic() string { - if a == nil || a.Topic == nil { - return "" - } - return *a.Topic -} - -// GetTransportProtocol returns the TransportProtocol field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTransportProtocol() int { - if a == nil || a.TransportProtocol == nil { - return 0 - } - return *a.TransportProtocol -} - -// GetTransportProtocolName returns the TransportProtocolName field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTransportProtocolName() string { - if a == nil || a.TransportProtocolName == nil { - return "" - } - return *a.TransportProtocolName -} - -// GetTriggerID returns the TriggerID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetTriggerID() int64 { - if a == nil || a.TriggerID == nil { - return 0 - } - return *a.TriggerID -} - -// GetUser returns the User field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetUser() string { - if a == nil || a.User == nil { - return "" - } - return *a.User -} - -// GetUserAgent returns the UserAgent field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetUserAgent() string { - if a == nil || a.UserAgent == nil { - return "" - } - return *a.UserAgent -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetVisibility() string { - if a == nil || a.Visibility == nil { - return "" - } - return *a.Visibility -} - -// GetWorkflowID returns the WorkflowID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetWorkflowID() int64 { - if a == nil || a.WorkflowID == nil { - return 0 - } - return *a.WorkflowID -} - -// GetWorkflowRunID returns the WorkflowRunID field if it's non-nil, zero value otherwise. -func (a *AuditEntry) GetWorkflowRunID() int64 { - if a == nil || a.WorkflowRunID == nil { - return 0 - } - return *a.WorkflowRunID -} - -// GetApp returns the App field. -func (a *Authorization) GetApp() *AuthorizationApp { - if a == nil { - return nil - } - return a.App -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (a *Authorization) GetCreatedAt() Timestamp { - if a == nil || a.CreatedAt == nil { - return Timestamp{} - } - return *a.CreatedAt -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (a *Authorization) GetFingerprint() string { - if a == nil || a.Fingerprint == nil { - return "" - } - return *a.Fingerprint -} - -// GetHashedToken returns the HashedToken field if it's non-nil, zero value otherwise. -func (a *Authorization) GetHashedToken() string { - if a == nil || a.HashedToken == nil { - return "" - } - return *a.HashedToken -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *Authorization) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (a *Authorization) GetNote() string { - if a == nil || a.Note == nil { - return "" - } - return *a.Note -} - -// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise. -func (a *Authorization) GetNoteURL() string { - if a == nil || a.NoteURL == nil { - return "" - } - return *a.NoteURL -} - -// GetToken returns the Token field if it's non-nil, zero value otherwise. -func (a *Authorization) GetToken() string { - if a == nil || a.Token == nil { - return "" - } - return *a.Token -} - -// GetTokenLastEight returns the TokenLastEight field if it's non-nil, zero value otherwise. -func (a *Authorization) GetTokenLastEight() string { - if a == nil || a.TokenLastEight == nil { - return "" - } - return *a.TokenLastEight -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (a *Authorization) GetUpdatedAt() Timestamp { - if a == nil || a.UpdatedAt == nil { - return Timestamp{} - } - return *a.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *Authorization) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetUser returns the User field. -func (a *Authorization) GetUser() *User { - if a == nil { - return nil - } - return a.User -} - -// GetClientID returns the ClientID field if it's non-nil, zero value otherwise. -func (a *AuthorizationApp) GetClientID() string { - if a == nil || a.ClientID == nil { - return "" - } - return *a.ClientID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (a *AuthorizationApp) GetName() string { - if a == nil || a.Name == nil { - return "" - } - return *a.Name -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (a *AuthorizationApp) GetURL() string { - if a == nil || a.URL == nil { - return "" - } - return *a.URL -} - -// GetClientID returns the ClientID field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetClientID() string { - if a == nil || a.ClientID == nil { - return "" - } - return *a.ClientID -} - -// GetClientSecret returns the ClientSecret field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetClientSecret() string { - if a == nil || a.ClientSecret == nil { - return "" - } - return *a.ClientSecret -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetFingerprint() string { - if a == nil || a.Fingerprint == nil { - return "" - } - return *a.Fingerprint -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetNote() string { - if a == nil || a.Note == nil { - return "" - } - return *a.Note -} - -// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise. -func (a *AuthorizationRequest) GetNoteURL() string { - if a == nil || a.NoteURL == nil { - return "" - } - return *a.NoteURL -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (a *AuthorizationUpdateRequest) GetFingerprint() string { - if a == nil || a.Fingerprint == nil { - return "" - } - return *a.Fingerprint -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (a *AuthorizationUpdateRequest) GetNote() string { - if a == nil || a.Note == nil { - return "" - } - return *a.Note -} - -// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise. -func (a *AuthorizationUpdateRequest) GetNoteURL() string { - if a == nil || a.NoteURL == nil { - return "" - } - return *a.NoteURL -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (a *AuthorizedActorsOnly) GetFrom() bool { - if a == nil || a.From == nil { - return false - } - return *a.From -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (a *AuthorizedDismissalActorsOnlyChanges) GetFrom() bool { - if a == nil || a.From == nil { - return false - } - return *a.From -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (a *Autolink) GetID() int64 { - if a == nil || a.ID == nil { - return 0 - } - return *a.ID -} - -// GetIsAlphanumeric returns the IsAlphanumeric field if it's non-nil, zero value otherwise. -func (a *Autolink) GetIsAlphanumeric() bool { - if a == nil || a.IsAlphanumeric == nil { - return false - } - return *a.IsAlphanumeric -} - -// GetKeyPrefix returns the KeyPrefix field if it's non-nil, zero value otherwise. -func (a *Autolink) GetKeyPrefix() string { - if a == nil || a.KeyPrefix == nil { - return "" - } - return *a.KeyPrefix -} - -// GetURLTemplate returns the URLTemplate field if it's non-nil, zero value otherwise. -func (a *Autolink) GetURLTemplate() string { - if a == nil || a.URLTemplate == nil { - return "" - } - return *a.URLTemplate -} - -// GetIsAlphanumeric returns the IsAlphanumeric field if it's non-nil, zero value otherwise. -func (a *AutolinkOptions) GetIsAlphanumeric() bool { - if a == nil || a.IsAlphanumeric == nil { - return false - } - return *a.IsAlphanumeric -} - -// GetKeyPrefix returns the KeyPrefix field if it's non-nil, zero value otherwise. -func (a *AutolinkOptions) GetKeyPrefix() string { - if a == nil || a.KeyPrefix == nil { - return "" - } - return *a.KeyPrefix -} - -// GetURLTemplate returns the URLTemplate field if it's non-nil, zero value otherwise. -func (a *AutolinkOptions) GetURLTemplate() string { - if a == nil || a.URLTemplate == nil { - return "" - } - return *a.URLTemplate -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (a *AutoTriggerCheck) GetAppID() int64 { - if a == nil || a.AppID == nil { - return 0 - } - return *a.AppID -} - -// GetSetting returns the Setting field if it's non-nil, zero value otherwise. -func (a *AutoTriggerCheck) GetSetting() bool { - if a == nil || a.Setting == nil { - return false - } - return *a.Setting -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (b *Blob) GetContent() string { - if b == nil || b.Content == nil { - return "" - } - return *b.Content -} - -// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise. -func (b *Blob) GetEncoding() string { - if b == nil || b.Encoding == nil { - return "" - } - return *b.Encoding -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (b *Blob) GetNodeID() string { - if b == nil || b.NodeID == nil { - return "" - } - return *b.NodeID -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (b *Blob) GetSHA() string { - if b == nil || b.SHA == nil { - return "" - } - return *b.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (b *Blob) GetSize() int { - if b == nil || b.Size == nil { - return 0 - } - return *b.Size -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (b *Blob) GetURL() string { - if b == nil || b.URL == nil { - return "" - } - return *b.URL -} - -// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. -func (b *BlockCreations) GetEnabled() bool { - if b == nil || b.Enabled == nil { - return false - } - return *b.Enabled -} - -// GetCommit returns the Commit field. -func (b *Branch) GetCommit() *RepositoryCommit { - if b == nil { - return nil - } - return b.Commit -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (b *Branch) GetName() string { - if b == nil || b.Name == nil { - return "" - } - return *b.Name -} - -// GetProtected returns the Protected field if it's non-nil, zero value otherwise. -func (b *Branch) GetProtected() bool { - if b == nil || b.Protected == nil { - return false - } - return *b.Protected -} - -// GetCommit returns the Commit field. -func (b *BranchCommit) GetCommit() *Commit { - if b == nil { - return nil - } - return b.Commit -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (b *BranchCommit) GetName() string { - if b == nil || b.Name == nil { - return "" - } - return *b.Name -} - -// GetProtected returns the Protected field if it's non-nil, zero value otherwise. -func (b *BranchCommit) GetProtected() bool { - if b == nil || b.Protected == nil { - return false - } - return *b.Protected -} - -// GetProtected returns the Protected field if it's non-nil, zero value otherwise. -func (b *BranchListOptions) GetProtected() bool { - if b == nil || b.Protected == nil { - return false - } - return *b.Protected -} - -// GetCustomBranchPolicies returns the CustomBranchPolicies field if it's non-nil, zero value otherwise. -func (b *BranchPolicy) GetCustomBranchPolicies() bool { - if b == nil || b.CustomBranchPolicies == nil { - return false - } - return *b.CustomBranchPolicies -} - -// GetProtectedBranches returns the ProtectedBranches field if it's non-nil, zero value otherwise. -func (b *BranchPolicy) GetProtectedBranches() bool { - if b == nil || b.ProtectedBranches == nil { - return false - } - return *b.ProtectedBranches -} - -// GetAdminEnforced returns the AdminEnforced field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetAdminEnforced() bool { - if b == nil || b.AdminEnforced == nil { - return false - } - return *b.AdminEnforced -} - -// GetAllowDeletionsEnforcementLevel returns the AllowDeletionsEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetAllowDeletionsEnforcementLevel() string { - if b == nil || b.AllowDeletionsEnforcementLevel == nil { - return "" - } - return *b.AllowDeletionsEnforcementLevel -} - -// GetAllowForcePushesEnforcementLevel returns the AllowForcePushesEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetAllowForcePushesEnforcementLevel() string { - if b == nil || b.AllowForcePushesEnforcementLevel == nil { - return "" - } - return *b.AllowForcePushesEnforcementLevel -} - -// GetAuthorizedActorsOnly returns the AuthorizedActorsOnly field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetAuthorizedActorsOnly() bool { - if b == nil || b.AuthorizedActorsOnly == nil { - return false - } - return *b.AuthorizedActorsOnly -} - -// GetAuthorizedDismissalActorsOnly returns the AuthorizedDismissalActorsOnly field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetAuthorizedDismissalActorsOnly() bool { - if b == nil || b.AuthorizedDismissalActorsOnly == nil { - return false - } - return *b.AuthorizedDismissalActorsOnly -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetCreatedAt() Timestamp { - if b == nil || b.CreatedAt == nil { - return Timestamp{} - } - return *b.CreatedAt -} - -// GetDismissStaleReviewsOnPush returns the DismissStaleReviewsOnPush field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetDismissStaleReviewsOnPush() bool { - if b == nil || b.DismissStaleReviewsOnPush == nil { - return false - } - return *b.DismissStaleReviewsOnPush -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetID() int64 { - if b == nil || b.ID == nil { - return 0 - } - return *b.ID -} - -// GetIgnoreApprovalsFromContributors returns the IgnoreApprovalsFromContributors field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetIgnoreApprovalsFromContributors() bool { - if b == nil || b.IgnoreApprovalsFromContributors == nil { - return false - } - return *b.IgnoreApprovalsFromContributors -} - -// GetLinearHistoryRequirementEnforcementLevel returns the LinearHistoryRequirementEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetLinearHistoryRequirementEnforcementLevel() string { - if b == nil || b.LinearHistoryRequirementEnforcementLevel == nil { - return "" - } - return *b.LinearHistoryRequirementEnforcementLevel -} - -// GetMergeQueueEnforcementLevel returns the MergeQueueEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetMergeQueueEnforcementLevel() string { - if b == nil || b.MergeQueueEnforcementLevel == nil { - return "" - } - return *b.MergeQueueEnforcementLevel -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetName() string { - if b == nil || b.Name == nil { - return "" - } - return *b.Name -} - -// GetPullRequestReviewsEnforcementLevel returns the PullRequestReviewsEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetPullRequestReviewsEnforcementLevel() string { - if b == nil || b.PullRequestReviewsEnforcementLevel == nil { - return "" - } - return *b.PullRequestReviewsEnforcementLevel -} - -// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetRepositoryID() int64 { - if b == nil || b.RepositoryID == nil { - return 0 - } - return *b.RepositoryID -} - -// GetRequireCodeOwnerReview returns the RequireCodeOwnerReview field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetRequireCodeOwnerReview() bool { - if b == nil || b.RequireCodeOwnerReview == nil { - return false - } - return *b.RequireCodeOwnerReview -} - -// GetRequiredApprovingReviewCount returns the RequiredApprovingReviewCount field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetRequiredApprovingReviewCount() int { - if b == nil || b.RequiredApprovingReviewCount == nil { - return 0 - } - return *b.RequiredApprovingReviewCount -} - -// GetRequiredConversationResolutionLevel returns the RequiredConversationResolutionLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetRequiredConversationResolutionLevel() string { - if b == nil || b.RequiredConversationResolutionLevel == nil { - return "" - } - return *b.RequiredConversationResolutionLevel -} - -// GetRequiredDeploymentsEnforcementLevel returns the RequiredDeploymentsEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetRequiredDeploymentsEnforcementLevel() string { - if b == nil || b.RequiredDeploymentsEnforcementLevel == nil { - return "" - } - return *b.RequiredDeploymentsEnforcementLevel -} - -// GetRequiredStatusChecksEnforcementLevel returns the RequiredStatusChecksEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetRequiredStatusChecksEnforcementLevel() string { - if b == nil || b.RequiredStatusChecksEnforcementLevel == nil { - return "" - } - return *b.RequiredStatusChecksEnforcementLevel -} - -// GetSignatureRequirementEnforcementLevel returns the SignatureRequirementEnforcementLevel field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetSignatureRequirementEnforcementLevel() string { - if b == nil || b.SignatureRequirementEnforcementLevel == nil { - return "" - } - return *b.SignatureRequirementEnforcementLevel -} - -// GetStrictRequiredStatusChecksPolicy returns the StrictRequiredStatusChecksPolicy field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetStrictRequiredStatusChecksPolicy() bool { - if b == nil || b.StrictRequiredStatusChecksPolicy == nil { - return false - } - return *b.StrictRequiredStatusChecksPolicy -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRule) GetUpdatedAt() Timestamp { - if b == nil || b.UpdatedAt == nil { - return Timestamp{} - } - return *b.UpdatedAt -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (b *BranchProtectionRuleEvent) GetAction() string { - if b == nil || b.Action == nil { - return "" - } - return *b.Action -} - -// GetChanges returns the Changes field. -func (b *BranchProtectionRuleEvent) GetChanges() *ProtectionChanges { - if b == nil { - return nil - } - return b.Changes -} - -// GetInstallation returns the Installation field. -func (b *BranchProtectionRuleEvent) GetInstallation() *Installation { - if b == nil { - return nil - } - return b.Installation -} - -// GetOrg returns the Org field. -func (b *BranchProtectionRuleEvent) GetOrg() *Organization { - if b == nil { - return nil - } - return b.Org -} - -// GetRepo returns the Repo field. -func (b *BranchProtectionRuleEvent) GetRepo() *Repository { - if b == nil { - return nil - } - return b.Repo -} - -// GetRule returns the Rule field. -func (b *BranchProtectionRuleEvent) GetRule() *BranchProtectionRule { - if b == nil { - return nil - } - return b.Rule -} - -// GetSender returns the Sender field. -func (b *BranchProtectionRuleEvent) GetSender() *User { - if b == nil { - return nil - } - return b.Sender -} - -// GetActorID returns the ActorID field if it's non-nil, zero value otherwise. -func (b *BypassActor) GetActorID() int64 { - if b == nil || b.ActorID == nil { - return 0 - } - return *b.ActorID -} - -// GetActorType returns the ActorType field if it's non-nil, zero value otherwise. -func (b *BypassActor) GetActorType() string { - if b == nil || b.ActorType == nil { - return "" - } - return *b.ActorType -} - -// GetApp returns the App field. -func (c *CheckRun) GetApp() *App { - if c == nil { - return nil - } - return c.App -} - -// GetCheckSuite returns the CheckSuite field. -func (c *CheckRun) GetCheckSuite() *CheckSuite { - if c == nil { - return nil - } - return c.CheckSuite -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetCompletedAt() Timestamp { - if c == nil || c.CompletedAt == nil { - return Timestamp{} - } - return *c.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetConclusion() string { - if c == nil || c.Conclusion == nil { - return "" - } - return *c.Conclusion -} - -// GetDetailsURL returns the DetailsURL field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetDetailsURL() string { - if c == nil || c.DetailsURL == nil { - return "" - } - return *c.DetailsURL -} - -// GetExternalID returns the ExternalID field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetExternalID() string { - if c == nil || c.ExternalID == nil { - return "" - } - return *c.ExternalID -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetHeadSHA() string { - if c == nil || c.HeadSHA == nil { - return "" - } - return *c.HeadSHA -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetOutput returns the Output field. -func (c *CheckRun) GetOutput() *CheckRunOutput { - if c == nil { - return nil - } - return c.Output -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetStartedAt() Timestamp { - if c == nil || c.StartedAt == nil { - return Timestamp{} - } - return *c.StartedAt -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CheckRun) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAnnotationLevel returns the AnnotationLevel field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetAnnotationLevel() string { - if c == nil || c.AnnotationLevel == nil { - return "" - } - return *c.AnnotationLevel -} - -// GetEndColumn returns the EndColumn field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetEndColumn() int { - if c == nil || c.EndColumn == nil { - return 0 - } - return *c.EndColumn -} - -// GetEndLine returns the EndLine field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetEndLine() int { - if c == nil || c.EndLine == nil { - return 0 - } - return *c.EndLine -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetMessage() string { - if c == nil || c.Message == nil { - return "" - } - return *c.Message -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetPath() string { - if c == nil || c.Path == nil { - return "" - } - return *c.Path -} - -// GetRawDetails returns the RawDetails field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetRawDetails() string { - if c == nil || c.RawDetails == nil { - return "" - } - return *c.RawDetails -} - -// GetStartColumn returns the StartColumn field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetStartColumn() int { - if c == nil || c.StartColumn == nil { - return 0 - } - return *c.StartColumn -} - -// GetStartLine returns the StartLine field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetStartLine() int { - if c == nil || c.StartLine == nil { - return 0 - } - return *c.StartLine -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (c *CheckRunAnnotation) GetTitle() string { - if c == nil || c.Title == nil { - return "" - } - return *c.Title -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *CheckRunEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetCheckRun returns the CheckRun field. -func (c *CheckRunEvent) GetCheckRun() *CheckRun { - if c == nil { - return nil - } - return c.CheckRun -} - -// GetInstallation returns the Installation field. -func (c *CheckRunEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetOrg returns the Org field. -func (c *CheckRunEvent) GetOrg() *Organization { - if c == nil { - return nil - } - return c.Org -} - -// GetRepo returns the Repo field. -func (c *CheckRunEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetRequestedAction returns the RequestedAction field. -func (c *CheckRunEvent) GetRequestedAction() *RequestedAction { - if c == nil { - return nil - } - return c.RequestedAction -} - -// GetSender returns the Sender field. -func (c *CheckRunEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetAlt returns the Alt field if it's non-nil, zero value otherwise. -func (c *CheckRunImage) GetAlt() string { - if c == nil || c.Alt == nil { - return "" - } - return *c.Alt -} - -// GetCaption returns the Caption field if it's non-nil, zero value otherwise. -func (c *CheckRunImage) GetCaption() string { - if c == nil || c.Caption == nil { - return "" - } - return *c.Caption -} - -// GetImageURL returns the ImageURL field if it's non-nil, zero value otherwise. -func (c *CheckRunImage) GetImageURL() string { - if c == nil || c.ImageURL == nil { - return "" - } - return *c.ImageURL -} - -// GetAnnotationsCount returns the AnnotationsCount field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetAnnotationsCount() int { - if c == nil || c.AnnotationsCount == nil { - return 0 - } - return *c.AnnotationsCount -} - -// GetAnnotationsURL returns the AnnotationsURL field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetAnnotationsURL() string { - if c == nil || c.AnnotationsURL == nil { - return "" - } - return *c.AnnotationsURL -} - -// GetSummary returns the Summary field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetSummary() string { - if c == nil || c.Summary == nil { - return "" - } - return *c.Summary -} - -// GetText returns the Text field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetText() string { - if c == nil || c.Text == nil { - return "" - } - return *c.Text -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (c *CheckRunOutput) GetTitle() string { - if c == nil || c.Title == nil { - return "" - } - return *c.Title -} - -// GetAfterSHA returns the AfterSHA field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetAfterSHA() string { - if c == nil || c.AfterSHA == nil { - return "" - } - return *c.AfterSHA -} - -// GetApp returns the App field. -func (c *CheckSuite) GetApp() *App { - if c == nil { - return nil - } - return c.App -} - -// GetBeforeSHA returns the BeforeSHA field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetBeforeSHA() string { - if c == nil || c.BeforeSHA == nil { - return "" - } - return *c.BeforeSHA -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetConclusion() string { - if c == nil || c.Conclusion == nil { - return "" - } - return *c.Conclusion -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetCreatedAt() Timestamp { - if c == nil || c.CreatedAt == nil { - return Timestamp{} - } - return *c.CreatedAt -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetHeadBranch() string { - if c == nil || c.HeadBranch == nil { - return "" - } - return *c.HeadBranch -} - -// GetHeadCommit returns the HeadCommit field. -func (c *CheckSuite) GetHeadCommit() *Commit { - if c == nil { - return nil - } - return c.HeadCommit -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetHeadSHA() string { - if c == nil || c.HeadSHA == nil { - return "" - } - return *c.HeadSHA -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetRepository returns the Repository field. -func (c *CheckSuite) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetUpdatedAt() Timestamp { - if c == nil || c.UpdatedAt == nil { - return Timestamp{} - } - return *c.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CheckSuite) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *CheckSuiteEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetCheckSuite returns the CheckSuite field. -func (c *CheckSuiteEvent) GetCheckSuite() *CheckSuite { - if c == nil { - return nil - } - return c.CheckSuite -} - -// GetInstallation returns the Installation field. -func (c *CheckSuiteEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetOrg returns the Org field. -func (c *CheckSuiteEvent) GetOrg() *Organization { - if c == nil { - return nil - } - return c.Org -} - -// GetRepo returns the Repo field. -func (c *CheckSuiteEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *CheckSuiteEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetPreferences returns the Preferences field. -func (c *CheckSuitePreferenceResults) GetPreferences() *PreferenceList { - if c == nil { - return nil - } - return c.Preferences -} - -// GetRepository returns the Repository field. -func (c *CheckSuitePreferenceResults) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetBody() string { - if c == nil || c.Body == nil { - return "" - } - return *c.Body -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetKey() string { - if c == nil || c.Key == nil { - return "" - } - return *c.Key -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CodeOfConduct) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetSuggestion returns the Suggestion field if it's non-nil, zero value otherwise. -func (c *CodeownersError) GetSuggestion() string { - if c == nil || c.Suggestion == nil { - return "" - } - return *c.Suggestion -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetPath() string { - if c == nil || c.Path == nil { - return "" - } - return *c.Path -} - -// GetRepository returns the Repository field. -func (c *CodeResult) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CodeResult) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *CodeScanningAlertEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetAlert returns the Alert field. -func (c *CodeScanningAlertEvent) GetAlert() *Alert { - if c == nil { - return nil - } - return c.Alert -} - -// GetCommitOID returns the CommitOID field if it's non-nil, zero value otherwise. -func (c *CodeScanningAlertEvent) GetCommitOID() string { - if c == nil || c.CommitOID == nil { - return "" - } - return *c.CommitOID -} - -// GetInstallation returns the Installation field. -func (c *CodeScanningAlertEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetOrg returns the Org field. -func (c *CodeScanningAlertEvent) GetOrg() *Organization { - if c == nil { - return nil - } - return c.Org -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (c *CodeScanningAlertEvent) GetRef() string { - if c == nil || c.Ref == nil { - return "" - } - return *c.Ref -} - -// GetRepo returns the Repo field. -func (c *CodeScanningAlertEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *CodeScanningAlertEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetDismissedComment returns the DismissedComment field if it's non-nil, zero value otherwise. -func (c *CodeScanningAlertState) GetDismissedComment() string { - if c == nil || c.DismissedComment == nil { - return "" - } - return *c.DismissedComment -} - -// GetDismissedReason returns the DismissedReason field if it's non-nil, zero value otherwise. -func (c *CodeScanningAlertState) GetDismissedReason() string { - if c == nil || c.DismissedReason == nil { - return "" - } - return *c.DismissedReason -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (c *CodeSearchResult) GetIncompleteResults() bool { - if c == nil || c.IncompleteResults == nil { - return false - } - return *c.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *CodeSearchResult) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetBillableOwner returns the BillableOwner field. -func (c *Codespace) GetBillableOwner() *User { - if c == nil { - return nil - } - return c.BillableOwner -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (c *Codespace) GetCreatedAt() Timestamp { - if c == nil || c.CreatedAt == nil { - return Timestamp{} - } - return *c.CreatedAt -} - -// GetDevcontainerPath returns the DevcontainerPath field if it's non-nil, zero value otherwise. -func (c *Codespace) GetDevcontainerPath() string { - if c == nil || c.DevcontainerPath == nil { - return "" - } - return *c.DevcontainerPath -} - -// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. -func (c *Codespace) GetDisplayName() string { - if c == nil || c.DisplayName == nil { - return "" - } - return *c.DisplayName -} - -// GetEnvironmentID returns the EnvironmentID field if it's non-nil, zero value otherwise. -func (c *Codespace) GetEnvironmentID() string { - if c == nil || c.EnvironmentID == nil { - return "" - } - return *c.EnvironmentID -} - -// GetGitStatus returns the GitStatus field. -func (c *Codespace) GetGitStatus() *CodespacesGitStatus { - if c == nil { - return nil - } - return c.GitStatus -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *Codespace) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetIdleTimeoutMinutes returns the IdleTimeoutMinutes field if it's non-nil, zero value otherwise. -func (c *Codespace) GetIdleTimeoutMinutes() int { - if c == nil || c.IdleTimeoutMinutes == nil { - return 0 - } - return *c.IdleTimeoutMinutes -} - -// GetIdleTimeoutNotice returns the IdleTimeoutNotice field if it's non-nil, zero value otherwise. -func (c *Codespace) GetIdleTimeoutNotice() string { - if c == nil || c.IdleTimeoutNotice == nil { - return "" - } - return *c.IdleTimeoutNotice -} - -// GetLastKnownStopNotice returns the LastKnownStopNotice field if it's non-nil, zero value otherwise. -func (c *Codespace) GetLastKnownStopNotice() string { - if c == nil || c.LastKnownStopNotice == nil { - return "" - } - return *c.LastKnownStopNotice -} - -// GetLastUsedAt returns the LastUsedAt field if it's non-nil, zero value otherwise. -func (c *Codespace) GetLastUsedAt() Timestamp { - if c == nil || c.LastUsedAt == nil { - return Timestamp{} - } - return *c.LastUsedAt -} - -// GetLocation returns the Location field if it's non-nil, zero value otherwise. -func (c *Codespace) GetLocation() string { - if c == nil || c.Location == nil { - return "" - } - return *c.Location -} - -// GetMachine returns the Machine field. -func (c *Codespace) GetMachine() *CodespacesMachine { - if c == nil { - return nil - } - return c.Machine -} - -// GetMachinesURL returns the MachinesURL field if it's non-nil, zero value otherwise. -func (c *Codespace) GetMachinesURL() string { - if c == nil || c.MachinesURL == nil { - return "" - } - return *c.MachinesURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *Codespace) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetOwner returns the Owner field. -func (c *Codespace) GetOwner() *User { - if c == nil { - return nil - } - return c.Owner -} - -// GetPendingOperation returns the PendingOperation field if it's non-nil, zero value otherwise. -func (c *Codespace) GetPendingOperation() bool { - if c == nil || c.PendingOperation == nil { - return false - } - return *c.PendingOperation -} - -// GetPendingOperationDisabledReason returns the PendingOperationDisabledReason field if it's non-nil, zero value otherwise. -func (c *Codespace) GetPendingOperationDisabledReason() string { - if c == nil || c.PendingOperationDisabledReason == nil { - return "" - } - return *c.PendingOperationDisabledReason -} - -// GetPrebuild returns the Prebuild field if it's non-nil, zero value otherwise. -func (c *Codespace) GetPrebuild() bool { - if c == nil || c.Prebuild == nil { - return false - } - return *c.Prebuild -} - -// GetPullsURL returns the PullsURL field if it's non-nil, zero value otherwise. -func (c *Codespace) GetPullsURL() string { - if c == nil || c.PullsURL == nil { - return "" - } - return *c.PullsURL -} - -// GetRepository returns the Repository field. -func (c *Codespace) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetRetentionExpiresAt returns the RetentionExpiresAt field if it's non-nil, zero value otherwise. -func (c *Codespace) GetRetentionExpiresAt() Timestamp { - if c == nil || c.RetentionExpiresAt == nil { - return Timestamp{} - } - return *c.RetentionExpiresAt -} - -// GetRetentionPeriodMinutes returns the RetentionPeriodMinutes field if it's non-nil, zero value otherwise. -func (c *Codespace) GetRetentionPeriodMinutes() int { - if c == nil || c.RetentionPeriodMinutes == nil { - return 0 - } - return *c.RetentionPeriodMinutes -} - -// GetRuntimeConstraints returns the RuntimeConstraints field. -func (c *Codespace) GetRuntimeConstraints() *CodespacesRuntimeConstraints { - if c == nil { - return nil - } - return c.RuntimeConstraints -} - -// GetStartURL returns the StartURL field if it's non-nil, zero value otherwise. -func (c *Codespace) GetStartURL() string { - if c == nil || c.StartURL == nil { - return "" - } - return *c.StartURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (c *Codespace) GetState() string { - if c == nil || c.State == nil { - return "" - } - return *c.State -} - -// GetStopURL returns the StopURL field if it's non-nil, zero value otherwise. -func (c *Codespace) GetStopURL() string { - if c == nil || c.StopURL == nil { - return "" - } - return *c.StopURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (c *Codespace) GetUpdatedAt() Timestamp { - if c == nil || c.UpdatedAt == nil { - return Timestamp{} - } - return *c.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *Codespace) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetWebURL returns the WebURL field if it's non-nil, zero value otherwise. -func (c *Codespace) GetWebURL() string { - if c == nil || c.WebURL == nil { - return "" - } - return *c.WebURL -} - -// GetAhead returns the Ahead field if it's non-nil, zero value otherwise. -func (c *CodespacesGitStatus) GetAhead() int { - if c == nil || c.Ahead == nil { - return 0 - } - return *c.Ahead -} - -// GetBehind returns the Behind field if it's non-nil, zero value otherwise. -func (c *CodespacesGitStatus) GetBehind() int { - if c == nil || c.Behind == nil { - return 0 - } - return *c.Behind -} - -// GetHasUncommittedChanges returns the HasUncommittedChanges field if it's non-nil, zero value otherwise. -func (c *CodespacesGitStatus) GetHasUncommittedChanges() bool { - if c == nil || c.HasUncommittedChanges == nil { - return false - } - return *c.HasUncommittedChanges -} - -// GetHasUnpushedChanges returns the HasUnpushedChanges field if it's non-nil, zero value otherwise. -func (c *CodespacesGitStatus) GetHasUnpushedChanges() bool { - if c == nil || c.HasUnpushedChanges == nil { - return false - } - return *c.HasUnpushedChanges -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (c *CodespacesGitStatus) GetRef() string { - if c == nil || c.Ref == nil { - return "" - } - return *c.Ref -} - -// GetCPUs returns the CPUs field if it's non-nil, zero value otherwise. -func (c *CodespacesMachine) GetCPUs() int { - if c == nil || c.CPUs == nil { - return 0 - } - return *c.CPUs -} - -// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. -func (c *CodespacesMachine) GetDisplayName() string { - if c == nil || c.DisplayName == nil { - return "" - } - return *c.DisplayName -} - -// GetMemoryInBytes returns the MemoryInBytes field if it's non-nil, zero value otherwise. -func (c *CodespacesMachine) GetMemoryInBytes() int64 { - if c == nil || c.MemoryInBytes == nil { - return 0 - } - return *c.MemoryInBytes -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CodespacesMachine) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetOperatingSystem returns the OperatingSystem field if it's non-nil, zero value otherwise. -func (c *CodespacesMachine) GetOperatingSystem() string { - if c == nil || c.OperatingSystem == nil { - return "" - } - return *c.OperatingSystem -} - -// GetPrebuildAvailability returns the PrebuildAvailability field if it's non-nil, zero value otherwise. -func (c *CodespacesMachine) GetPrebuildAvailability() string { - if c == nil || c.PrebuildAvailability == nil { - return "" - } - return *c.PrebuildAvailability -} - -// GetStorageInBytes returns the StorageInBytes field if it's non-nil, zero value otherwise. -func (c *CodespacesMachine) GetStorageInBytes() int64 { - if c == nil || c.StorageInBytes == nil { - return 0 - } - return *c.StorageInBytes -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (c *CollaboratorInvitation) GetCreatedAt() Timestamp { - if c == nil || c.CreatedAt == nil { - return Timestamp{} - } - return *c.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CollaboratorInvitation) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CollaboratorInvitation) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetInvitee returns the Invitee field. -func (c *CollaboratorInvitation) GetInvitee() *User { - if c == nil { - return nil - } - return c.Invitee -} - -// GetInviter returns the Inviter field. -func (c *CollaboratorInvitation) GetInviter() *User { - if c == nil { - return nil - } - return c.Inviter -} - -// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise. -func (c *CollaboratorInvitation) GetPermissions() string { - if c == nil || c.Permissions == nil { - return "" - } - return *c.Permissions -} - -// GetRepo returns the Repo field. -func (c *CollaboratorInvitation) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CollaboratorInvitation) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetCommitURL() string { - if c == nil || c.CommitURL == nil { - return "" - } - return *c.CommitURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetRepositoryURL() string { - if c == nil || c.RepositoryURL == nil { - return "" - } - return *c.RepositoryURL -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetState() string { - if c == nil || c.State == nil { - return "" - } - return *c.State -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (c *CombinedStatus) GetTotalCount() int { - if c == nil || c.TotalCount == nil { - return 0 - } - return *c.TotalCount -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (c *Comment) GetCreatedAt() Timestamp { - if c == nil || c.CreatedAt == nil { - return Timestamp{} - } - return *c.CreatedAt -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetAuthorAssociation() string { - if c == nil || c.AuthorAssociation == nil { - return "" - } - return *c.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetBody() string { - if c == nil || c.Body == nil { - return "" - } - return *c.Body -} - -// GetChildCommentCount returns the ChildCommentCount field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetChildCommentCount() int { - if c == nil || c.ChildCommentCount == nil { - return 0 - } - return *c.ChildCommentCount -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetCreatedAt() Timestamp { - if c == nil || c.CreatedAt == nil { - return Timestamp{} - } - return *c.CreatedAt -} - -// GetDiscussionID returns the DiscussionID field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetDiscussionID() int64 { - if c == nil || c.DiscussionID == nil { - return 0 - } - return *c.DiscussionID -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetParentID returns the ParentID field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetParentID() int64 { - if c == nil || c.ParentID == nil { - return 0 - } - return *c.ParentID -} - -// GetReactions returns the Reactions field. -func (c *CommentDiscussion) GetReactions() *Reactions { - if c == nil { - return nil - } - return c.Reactions -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetRepositoryURL() string { - if c == nil || c.RepositoryURL == nil { - return "" - } - return *c.RepositoryURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (c *CommentDiscussion) GetUpdatedAt() Timestamp { - if c == nil || c.UpdatedAt == nil { - return Timestamp{} - } - return *c.UpdatedAt -} - -// GetUser returns the User field. -func (c *CommentDiscussion) GetUser() *User { - if c == nil { - return nil - } - return c.User -} - -// GetTotalCommitComments returns the TotalCommitComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalCommitComments() int { - if c == nil || c.TotalCommitComments == nil { - return 0 - } - return *c.TotalCommitComments -} - -// GetTotalGistComments returns the TotalGistComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalGistComments() int { - if c == nil || c.TotalGistComments == nil { - return 0 - } - return *c.TotalGistComments -} - -// GetTotalIssueComments returns the TotalIssueComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalIssueComments() int { - if c == nil || c.TotalIssueComments == nil { - return 0 - } - return *c.TotalIssueComments -} - -// GetTotalPullRequestComments returns the TotalPullRequestComments field if it's non-nil, zero value otherwise. -func (c *CommentStats) GetTotalPullRequestComments() int { - if c == nil || c.TotalPullRequestComments == nil { - return 0 - } - return *c.TotalPullRequestComments -} - -// GetAuthor returns the Author field. -func (c *Commit) GetAuthor() *CommitAuthor { - if c == nil { - return nil - } - return c.Author -} - -// GetCommentCount returns the CommentCount field if it's non-nil, zero value otherwise. -func (c *Commit) GetCommentCount() int { - if c == nil || c.CommentCount == nil { - return 0 - } - return *c.CommentCount -} - -// GetCommitter returns the Committer field. -func (c *Commit) GetCommitter() *CommitAuthor { - if c == nil { - return nil - } - return c.Committer -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *Commit) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (c *Commit) GetMessage() string { - if c == nil || c.Message == nil { - return "" - } - return *c.Message -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *Commit) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *Commit) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetStats returns the Stats field. -func (c *Commit) GetStats() *CommitStats { - if c == nil { - return nil - } - return c.Stats -} - -// GetTree returns the Tree field. -func (c *Commit) GetTree() *Tree { - if c == nil { - return nil - } - return c.Tree -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *Commit) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetVerification returns the Verification field. -func (c *Commit) GetVerification() *SignatureVerification { - if c == nil { - return nil - } - return c.Verification -} - -// GetDate returns the Date field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetDate() Timestamp { - if c == nil || c.Date == nil { - return Timestamp{} - } - return *c.Date -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetEmail() string { - if c == nil || c.Email == nil { - return "" - } - return *c.Email -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetLogin() string { - if c == nil || c.Login == nil { - return "" - } - return *c.Login -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *CommitCommentEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetComment returns the Comment field. -func (c *CommitCommentEvent) GetComment() *RepositoryComment { - if c == nil { - return nil - } - return c.Comment -} - -// GetInstallation returns the Installation field. -func (c *CommitCommentEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetRepo returns the Repo field. -func (c *CommitCommentEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *CommitCommentEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetAdditions() int { - if c == nil || c.Additions == nil { - return 0 - } - return *c.Additions -} - -// GetBlobURL returns the BlobURL field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetBlobURL() string { - if c == nil || c.BlobURL == nil { - return "" - } - return *c.BlobURL -} - -// GetChanges returns the Changes field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetChanges() int { - if c == nil || c.Changes == nil { - return 0 - } - return *c.Changes -} - -// GetContentsURL returns the ContentsURL field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetContentsURL() string { - if c == nil || c.ContentsURL == nil { - return "" - } - return *c.ContentsURL -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetDeletions() int { - if c == nil || c.Deletions == nil { - return 0 - } - return *c.Deletions -} - -// GetFilename returns the Filename field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetFilename() string { - if c == nil || c.Filename == nil { - return "" - } - return *c.Filename -} - -// GetPatch returns the Patch field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetPatch() string { - if c == nil || c.Patch == nil { - return "" - } - return *c.Patch -} - -// GetPreviousFilename returns the PreviousFilename field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetPreviousFilename() string { - if c == nil || c.PreviousFilename == nil { - return "" - } - return *c.PreviousFilename -} - -// GetRawURL returns the RawURL field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetRawURL() string { - if c == nil || c.RawURL == nil { - return "" - } - return *c.RawURL -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CommitFile) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetAuthor returns the Author field. -func (c *CommitResult) GetAuthor() *User { - if c == nil { - return nil - } - return c.Author -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetCommentsURL() string { - if c == nil || c.CommentsURL == nil { - return "" - } - return *c.CommentsURL -} - -// GetCommit returns the Commit field. -func (c *CommitResult) GetCommit() *Commit { - if c == nil { - return nil - } - return c.Commit -} - -// GetCommitter returns the Committer field. -func (c *CommitResult) GetCommitter() *User { - if c == nil { - return nil - } - return c.Committer -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetRepository returns the Repository field. -func (c *CommitResult) GetRepository() *Repository { - if c == nil { - return nil - } - return c.Repository -} - -// GetScore returns the Score field. -func (c *CommitResult) GetScore() *float64 { - if c == nil { - return nil - } - return c.Score -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetSHA() string { - if c == nil || c.SHA == nil { - return "" - } - return *c.SHA -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CommitResult) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAheadBy returns the AheadBy field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetAheadBy() int { - if c == nil || c.AheadBy == nil { - return 0 - } - return *c.AheadBy -} - -// GetBaseCommit returns the BaseCommit field. -func (c *CommitsComparison) GetBaseCommit() *RepositoryCommit { - if c == nil { - return nil - } - return c.BaseCommit -} - -// GetBehindBy returns the BehindBy field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetBehindBy() int { - if c == nil || c.BehindBy == nil { - return 0 - } - return *c.BehindBy -} - -// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetDiffURL() string { - if c == nil || c.DiffURL == nil { - return "" - } - return *c.DiffURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetMergeBaseCommit returns the MergeBaseCommit field. -func (c *CommitsComparison) GetMergeBaseCommit() *RepositoryCommit { - if c == nil { - return nil - } - return c.MergeBaseCommit -} - -// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetPatchURL() string { - if c == nil || c.PatchURL == nil { - return "" - } - return *c.PatchURL -} - -// GetPermalinkURL returns the PermalinkURL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetPermalinkURL() string { - if c == nil || c.PermalinkURL == nil { - return "" - } - return *c.PermalinkURL -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetTotalCommits returns the TotalCommits field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetTotalCommits() int { - if c == nil || c.TotalCommits == nil { - return 0 - } - return *c.TotalCommits -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *CommitsComparison) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (c *CommitsSearchResult) GetIncompleteResults() bool { - if c == nil || c.IncompleteResults == nil { - return false - } - return *c.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *CommitsSearchResult) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (c *CommitStats) GetAdditions() int { - if c == nil || c.Additions == nil { - return 0 - } - return *c.Additions -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (c *CommitStats) GetDeletions() int { - if c == nil || c.Deletions == nil { - return 0 - } - return *c.Deletions -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *CommitStats) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetCodeOfConduct returns the CodeOfConduct field. -func (c *CommunityHealthFiles) GetCodeOfConduct() *Metric { - if c == nil { - return nil - } - return c.CodeOfConduct -} - -// GetCodeOfConductFile returns the CodeOfConductFile field. -func (c *CommunityHealthFiles) GetCodeOfConductFile() *Metric { - if c == nil { - return nil - } - return c.CodeOfConductFile -} - -// GetContributing returns the Contributing field. -func (c *CommunityHealthFiles) GetContributing() *Metric { - if c == nil { - return nil - } - return c.Contributing -} - -// GetIssueTemplate returns the IssueTemplate field. -func (c *CommunityHealthFiles) GetIssueTemplate() *Metric { - if c == nil { - return nil - } - return c.IssueTemplate -} - -// GetLicense returns the License field. -func (c *CommunityHealthFiles) GetLicense() *Metric { - if c == nil { - return nil - } - return c.License -} - -// GetPullRequestTemplate returns the PullRequestTemplate field. -func (c *CommunityHealthFiles) GetPullRequestTemplate() *Metric { - if c == nil { - return nil - } - return c.PullRequestTemplate -} - -// GetReadme returns the Readme field. -func (c *CommunityHealthFiles) GetReadme() *Metric { - if c == nil { - return nil - } - return c.Readme -} - -// GetContentReportsEnabled returns the ContentReportsEnabled field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetContentReportsEnabled() bool { - if c == nil || c.ContentReportsEnabled == nil { - return false - } - return *c.ContentReportsEnabled -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetDescription() string { - if c == nil || c.Description == nil { - return "" - } - return *c.Description -} - -// GetDocumentation returns the Documentation field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetDocumentation() string { - if c == nil || c.Documentation == nil { - return "" - } - return *c.Documentation -} - -// GetFiles returns the Files field. -func (c *CommunityHealthMetrics) GetFiles() *CommunityHealthFiles { - if c == nil { - return nil - } - return c.Files -} - -// GetHealthPercentage returns the HealthPercentage field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetHealthPercentage() int { - if c == nil || c.HealthPercentage == nil { - return 0 - } - return *c.HealthPercentage -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetUpdatedAt() Timestamp { - if c == nil || c.UpdatedAt == nil { - return Timestamp{} - } - return *c.UpdatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *ContentReference) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *ContentReference) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetReference returns the Reference field if it's non-nil, zero value otherwise. -func (c *ContentReference) GetReference() string { - if c == nil || c.Reference == nil { - return "" - } - return *c.Reference -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (c *ContentReferenceEvent) GetAction() string { - if c == nil || c.Action == nil { - return "" - } - return *c.Action -} - -// GetContentReference returns the ContentReference field. -func (c *ContentReferenceEvent) GetContentReference() *ContentReference { - if c == nil { - return nil - } - return c.ContentReference -} - -// GetInstallation returns the Installation field. -func (c *ContentReferenceEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetRepo returns the Repo field. -func (c *ContentReferenceEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *ContentReferenceEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetAvatarURL() string { - if c == nil || c.AvatarURL == nil { - return "" - } - return *c.AvatarURL -} - -// GetContributions returns the Contributions field if it's non-nil, zero value otherwise. -func (c *Contributor) GetContributions() int { - if c == nil || c.Contributions == nil { - return 0 - } - return *c.Contributions -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (c *Contributor) GetEmail() string { - if c == nil || c.Email == nil { - return "" - } - return *c.Email -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetEventsURL() string { - if c == nil || c.EventsURL == nil { - return "" - } - return *c.EventsURL -} - -// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetFollowersURL() string { - if c == nil || c.FollowersURL == nil { - return "" - } - return *c.FollowersURL -} - -// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetFollowingURL() string { - if c == nil || c.FollowingURL == nil { - return "" - } - return *c.FollowingURL -} - -// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetGistsURL() string { - if c == nil || c.GistsURL == nil { - return "" - } - return *c.GistsURL -} - -// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise. -func (c *Contributor) GetGravatarID() string { - if c == nil || c.GravatarID == nil { - return "" - } - return *c.GravatarID -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetHTMLURL() string { - if c == nil || c.HTMLURL == nil { - return "" - } - return *c.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *Contributor) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (c *Contributor) GetLogin() string { - if c == nil || c.Login == nil { - return "" - } - return *c.Login -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *Contributor) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (c *Contributor) GetNodeID() string { - if c == nil || c.NodeID == nil { - return "" - } - return *c.NodeID -} - -// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetOrganizationsURL() string { - if c == nil || c.OrganizationsURL == nil { - return "" - } - return *c.OrganizationsURL -} - -// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetReceivedEventsURL() string { - if c == nil || c.ReceivedEventsURL == nil { - return "" - } - return *c.ReceivedEventsURL -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetReposURL() string { - if c == nil || c.ReposURL == nil { - return "" - } - return *c.ReposURL -} - -// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise. -func (c *Contributor) GetSiteAdmin() bool { - if c == nil || c.SiteAdmin == nil { - return false - } - return *c.SiteAdmin -} - -// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetStarredURL() string { - if c == nil || c.StarredURL == nil { - return "" - } - return *c.StarredURL -} - -// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetSubscriptionsURL() string { - if c == nil || c.SubscriptionsURL == nil { - return "" - } - return *c.SubscriptionsURL -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (c *Contributor) GetType() string { - if c == nil || c.Type == nil { - return "" - } - return *c.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (c *Contributor) GetURL() string { - if c == nil || c.URL == nil { - return "" - } - return *c.URL -} - -// GetAuthor returns the Author field. -func (c *ContributorStats) GetAuthor() *Contributor { - if c == nil { - return nil - } - return c.Author -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (c *ContributorStats) GetTotal() int { - if c == nil || c.Total == nil { - return 0 - } - return *c.Total -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetCompletedAt() Timestamp { - if c == nil || c.CompletedAt == nil { - return Timestamp{} - } - return *c.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetConclusion() string { - if c == nil || c.Conclusion == nil { - return "" - } - return *c.Conclusion -} - -// GetDetailsURL returns the DetailsURL field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetDetailsURL() string { - if c == nil || c.DetailsURL == nil { - return "" - } - return *c.DetailsURL -} - -// GetExternalID returns the ExternalID field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetExternalID() string { - if c == nil || c.ExternalID == nil { - return "" - } - return *c.ExternalID -} - -// GetOutput returns the Output field. -func (c *CreateCheckRunOptions) GetOutput() *CheckRunOutput { - if c == nil { - return nil - } - return c.Output -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetStartedAt() Timestamp { - if c == nil || c.StartedAt == nil { - return Timestamp{} - } - return *c.StartedAt -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (c *CreateCheckRunOptions) GetStatus() string { - if c == nil || c.Status == nil { - return "" - } - return *c.Status -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (c *CreateCheckSuiteOptions) GetHeadBranch() string { - if c == nil || c.HeadBranch == nil { - return "" - } - return *c.HeadBranch -} - -// GetClientIP returns the ClientIP field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetClientIP() string { - if c == nil || c.ClientIP == nil { - return "" - } - return *c.ClientIP -} - -// GetDevcontainerPath returns the DevcontainerPath field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetDevcontainerPath() string { - if c == nil || c.DevcontainerPath == nil { - return "" - } - return *c.DevcontainerPath -} - -// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetDisplayName() string { - if c == nil || c.DisplayName == nil { - return "" - } - return *c.DisplayName -} - -// GetGeo returns the Geo field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetGeo() string { - if c == nil || c.Geo == nil { - return "" - } - return *c.Geo -} - -// GetIdleTimeoutMinutes returns the IdleTimeoutMinutes field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetIdleTimeoutMinutes() int { - if c == nil || c.IdleTimeoutMinutes == nil { - return 0 - } - return *c.IdleTimeoutMinutes -} - -// GetMachine returns the Machine field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetMachine() string { - if c == nil || c.Machine == nil { - return "" - } - return *c.Machine -} - -// GetMultiRepoPermissionsOptOut returns the MultiRepoPermissionsOptOut field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetMultiRepoPermissionsOptOut() bool { - if c == nil || c.MultiRepoPermissionsOptOut == nil { - return false - } - return *c.MultiRepoPermissionsOptOut -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetRef() string { - if c == nil || c.Ref == nil { - return "" - } - return *c.Ref -} - -// GetRetentionPeriodMinutes returns the RetentionPeriodMinutes field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetRetentionPeriodMinutes() int { - if c == nil || c.RetentionPeriodMinutes == nil { - return 0 - } - return *c.RetentionPeriodMinutes -} - -// GetWorkingDirectory returns the WorkingDirectory field if it's non-nil, zero value otherwise. -func (c *CreateCodespaceOptions) GetWorkingDirectory() string { - if c == nil || c.WorkingDirectory == nil { - return "" - } - return *c.WorkingDirectory -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetDescription() string { - if c == nil || c.Description == nil { - return "" - } - return *c.Description -} - -// GetInstallation returns the Installation field. -func (c *CreateEvent) GetInstallation() *Installation { - if c == nil { - return nil - } - return c.Installation -} - -// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetMasterBranch() string { - if c == nil || c.MasterBranch == nil { - return "" - } - return *c.MasterBranch -} - -// GetOrg returns the Org field. -func (c *CreateEvent) GetOrg() *Organization { - if c == nil { - return nil - } - return c.Org -} - -// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetPusherType() string { - if c == nil || c.PusherType == nil { - return "" - } - return *c.PusherType -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetRef() string { - if c == nil || c.Ref == nil { - return "" - } - return *c.Ref -} - -// GetRefType returns the RefType field if it's non-nil, zero value otherwise. -func (c *CreateEvent) GetRefType() string { - if c == nil || c.RefType == nil { - return "" - } - return *c.RefType -} - -// GetRepo returns the Repo field. -func (c *CreateEvent) GetRepo() *Repository { - if c == nil { - return nil - } - return c.Repo -} - -// GetSender returns the Sender field. -func (c *CreateEvent) GetSender() *User { - if c == nil { - return nil - } - return c.Sender -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (c *CreateOrgInvitationOptions) GetEmail() string { - if c == nil || c.Email == nil { - return "" - } - return *c.Email -} - -// GetInviteeID returns the InviteeID field if it's non-nil, zero value otherwise. -func (c *CreateOrgInvitationOptions) GetInviteeID() int64 { - if c == nil || c.InviteeID == nil { - return 0 - } - return *c.InviteeID -} - -// GetRole returns the Role field if it's non-nil, zero value otherwise. -func (c *CreateOrgInvitationOptions) GetRole() string { - if c == nil || c.Role == nil { - return "" - } - return *c.Role -} - -// GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. -func (c *CreateOrUpdateCustomRoleOptions) GetBaseRole() string { - if c == nil || c.BaseRole == nil { - return "" - } - return *c.BaseRole -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (c *CreateOrUpdateCustomRoleOptions) GetDescription() string { - if c == nil || c.Description == nil { - return "" - } - return *c.Description -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CreateOrUpdateCustomRoleOptions) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (c *CreateProtectedChanges) GetFrom() bool { - if c == nil || c.From == nil { - return false - } - return *c.From -} - -// GetAllowsPublicRepositories returns the AllowsPublicRepositories field if it's non-nil, zero value otherwise. -func (c *CreateRunnerGroupRequest) GetAllowsPublicRepositories() bool { - if c == nil || c.AllowsPublicRepositories == nil { - return false - } - return *c.AllowsPublicRepositories -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CreateRunnerGroupRequest) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetRestrictedToWorkflows returns the RestrictedToWorkflows field if it's non-nil, zero value otherwise. -func (c *CreateRunnerGroupRequest) GetRestrictedToWorkflows() bool { - if c == nil || c.RestrictedToWorkflows == nil { - return false - } - return *c.RestrictedToWorkflows -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (c *CreateRunnerGroupRequest) GetVisibility() string { - if c == nil || c.Visibility == nil { - return "" - } - return *c.Visibility -} - -// GetCanAdminsBypass returns the CanAdminsBypass field if it's non-nil, zero value otherwise. -func (c *CreateUpdateEnvironment) GetCanAdminsBypass() bool { - if c == nil || c.CanAdminsBypass == nil { - return false - } - return *c.CanAdminsBypass -} - -// GetDeploymentBranchPolicy returns the DeploymentBranchPolicy field. -func (c *CreateUpdateEnvironment) GetDeploymentBranchPolicy() *BranchPolicy { - if c == nil { - return nil - } - return c.DeploymentBranchPolicy -} - -// GetWaitTimer returns the WaitTimer field if it's non-nil, zero value otherwise. -func (c *CreateUpdateEnvironment) GetWaitTimer() int { - if c == nil || c.WaitTimer == nil { - return 0 - } - return *c.WaitTimer -} - -// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise. -func (c *CreateUpdateRequiredWorkflowOptions) GetRepositoryID() int64 { - if c == nil || c.RepositoryID == nil { - return 0 - } - return *c.RepositoryID -} - -// GetScope returns the Scope field if it's non-nil, zero value otherwise. -func (c *CreateUpdateRequiredWorkflowOptions) GetScope() string { - if c == nil || c.Scope == nil { - return "" - } - return *c.Scope -} - -// GetSelectedRepositoryIDs returns the SelectedRepositoryIDs field. -func (c *CreateUpdateRequiredWorkflowOptions) GetSelectedRepositoryIDs() *SelectedRepoIDs { - if c == nil { - return nil - } - return c.SelectedRepositoryIDs -} - -// GetWorkflowFilePath returns the WorkflowFilePath field if it's non-nil, zero value otherwise. -func (c *CreateUpdateRequiredWorkflowOptions) GetWorkflowFilePath() string { - if c == nil || c.WorkflowFilePath == nil { - return "" - } - return *c.WorkflowFilePath -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (c *CreateUserProjectOptions) GetBody() string { - if c == nil || c.Body == nil { - return "" - } - return *c.Body -} - -// GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. -func (c *CustomRepoRoles) GetBaseRole() string { - if c == nil || c.BaseRole == nil { - return "" - } - return *c.BaseRole -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (c *CustomRepoRoles) GetDescription() string { - if c == nil || c.Description == nil { - return "" - } - return *c.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CustomRepoRoles) GetID() int64 { - if c == nil || c.ID == nil { - return 0 - } - return *c.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CustomRepoRoles) GetName() string { - if c == nil || c.Name == nil { - return "" - } - return *c.Name -} - -// GetQuerySuite returns the QuerySuite field if it's non-nil, zero value otherwise. -func (d *DefaultSetupConfiguration) GetQuerySuite() string { - if d == nil || d.QuerySuite == nil { - return "" - } - return *d.QuerySuite -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *DefaultSetupConfiguration) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DefaultSetupConfiguration) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetInstallation returns the Installation field. -func (d *DeleteEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetPusherType() string { - if d == nil || d.PusherType == nil { - return "" - } - return *d.PusherType -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetRef() string { - if d == nil || d.Ref == nil { - return "" - } - return *d.Ref -} - -// GetRefType returns the RefType field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetRefType() string { - if d == nil || d.RefType == nil { - return "" - } - return *d.RefType -} - -// GetRepo returns the Repo field. -func (d *DeleteEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeleteEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetDependency returns the Dependency field. -func (d *DependabotAlert) GetDependency() *Dependency { - if d == nil { - return nil - } - return d.Dependency -} - -// GetDismissedAt returns the DismissedAt field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetDismissedAt() Timestamp { - if d == nil || d.DismissedAt == nil { - return Timestamp{} - } - return *d.DismissedAt -} - -// GetDismissedBy returns the DismissedBy field. -func (d *DependabotAlert) GetDismissedBy() *User { - if d == nil { - return nil - } - return d.DismissedBy -} - -// GetDismissedComment returns the DismissedComment field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetDismissedComment() string { - if d == nil || d.DismissedComment == nil { - return "" - } - return *d.DismissedComment -} - -// GetDismissedReason returns the DismissedReason field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetDismissedReason() string { - if d == nil || d.DismissedReason == nil { - return "" - } - return *d.DismissedReason -} - -// GetFixedAt returns the FixedAt field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetFixedAt() Timestamp { - if d == nil || d.FixedAt == nil { - return Timestamp{} - } - return *d.FixedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetHTMLURL() string { - if d == nil || d.HTMLURL == nil { - return "" - } - return *d.HTMLURL -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetNumber() int { - if d == nil || d.Number == nil { - return 0 - } - return *d.Number -} - -// GetRepository returns the Repository field. -func (d *DependabotAlert) GetRepository() *Repository { - if d == nil { - return nil - } - return d.Repository -} - -// GetSecurityAdvisory returns the SecurityAdvisory field. -func (d *DependabotAlert) GetSecurityAdvisory() *DependabotSecurityAdvisory { - if d == nil { - return nil - } - return d.SecurityAdvisory -} - -// GetSecurityVulnerability returns the SecurityVulnerability field. -func (d *DependabotAlert) GetSecurityVulnerability() *AdvisoryVulnerability { - if d == nil { - return nil - } - return d.SecurityVulnerability -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (d *DependabotAlert) GetURL() string { - if d == nil || d.URL == nil { - return "" - } - return *d.URL -} - -// GetCVEID returns the CVEID field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetCVEID() string { - if d == nil || d.CVEID == nil { - return "" - } - return *d.CVEID -} - -// GetCVSs returns the CVSs field. -func (d *DependabotSecurityAdvisory) GetCVSs() *AdvisoryCVSs { - if d == nil { - return nil - } - return d.CVSs -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetGHSAID returns the GHSAID field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetGHSAID() string { - if d == nil || d.GHSAID == nil { - return "" - } - return *d.GHSAID -} - -// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetPublishedAt() Timestamp { - if d == nil || d.PublishedAt == nil { - return Timestamp{} - } - return *d.PublishedAt -} - -// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetSeverity() string { - if d == nil || d.Severity == nil { - return "" - } - return *d.Severity -} - -// GetSummary returns the Summary field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetSummary() string { - if d == nil || d.Summary == nil { - return "" - } - return *d.Summary -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetWithdrawnAt returns the WithdrawnAt field if it's non-nil, zero value otherwise. -func (d *DependabotSecurityAdvisory) GetWithdrawnAt() Timestamp { - if d == nil || d.WithdrawnAt == nil { - return Timestamp{} - } - return *d.WithdrawnAt -} - -// GetManifestPath returns the ManifestPath field if it's non-nil, zero value otherwise. -func (d *Dependency) GetManifestPath() string { - if d == nil || d.ManifestPath == nil { - return "" - } - return *d.ManifestPath -} - -// GetPackage returns the Package field. -func (d *Dependency) GetPackage() *VulnerabilityPackage { - if d == nil { - return nil - } - return d.Package -} - -// GetScope returns the Scope field if it's non-nil, zero value otherwise. -func (d *Dependency) GetScope() string { - if d == nil || d.Scope == nil { - return "" - } - return *d.Scope -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (d *DeployKeyEvent) GetAction() string { - if d == nil || d.Action == nil { - return "" - } - return *d.Action -} - -// GetInstallation returns the Installation field. -func (d *DeployKeyEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetKey returns the Key field. -func (d *DeployKeyEvent) GetKey() *Key { - if d == nil { - return nil - } - return d.Key -} - -// GetOrganization returns the Organization field. -func (d *DeployKeyEvent) GetOrganization() *Organization { - if d == nil { - return nil - } - return d.Organization -} - -// GetRepo returns the Repo field. -func (d *DeployKeyEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeployKeyEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *Deployment) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetCreator returns the Creator field. -func (d *Deployment) GetCreator() *User { - if d == nil { - return nil - } - return d.Creator -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *Deployment) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *Deployment) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (d *Deployment) GetID() int64 { - if d == nil || d.ID == nil { - return 0 - } - return *d.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *Deployment) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (d *Deployment) GetRef() string { - if d == nil || d.Ref == nil { - return "" - } - return *d.Ref -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (d *Deployment) GetRepositoryURL() string { - if d == nil || d.RepositoryURL == nil { - return "" - } - return *d.RepositoryURL -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (d *Deployment) GetSHA() string { - if d == nil || d.SHA == nil { - return "" - } - return *d.SHA -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (d *Deployment) GetStatusesURL() string { - if d == nil || d.StatusesURL == nil { - return "" - } - return *d.StatusesURL -} - -// GetTask returns the Task field if it's non-nil, zero value otherwise. -func (d *Deployment) GetTask() string { - if d == nil || d.Task == nil { - return "" - } - return *d.Task -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *Deployment) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (d *Deployment) GetURL() string { - if d == nil || d.URL == nil { - return "" - } - return *d.URL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (d *DeploymentBranchPolicy) GetID() int64 { - if d == nil || d.ID == nil { - return 0 - } - return *d.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (d *DeploymentBranchPolicy) GetName() string { - if d == nil || d.Name == nil { - return "" - } - return *d.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *DeploymentBranchPolicy) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (d *DeploymentBranchPolicyRequest) GetName() string { - if d == nil || d.Name == nil { - return "" - } - return *d.Name -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (d *DeploymentBranchPolicyResponse) GetTotalCount() int { - if d == nil || d.TotalCount == nil { - return 0 - } - return *d.TotalCount -} - -// GetDeployment returns the Deployment field. -func (d *DeploymentEvent) GetDeployment() *Deployment { - if d == nil { - return nil - } - return d.Deployment -} - -// GetInstallation returns the Installation field. -func (d *DeploymentEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetRepo returns the Repo field. -func (d *DeploymentEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeploymentEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (d *DeploymentProtectionRuleEvent) GetAction() string { - if d == nil || d.Action == nil { - return "" - } - return *d.Action -} - -// GetDeployment returns the Deployment field. -func (d *DeploymentProtectionRuleEvent) GetDeployment() *Deployment { - if d == nil { - return nil - } - return d.Deployment -} - -// GetDeploymentCallbackURL returns the DeploymentCallbackURL field if it's non-nil, zero value otherwise. -func (d *DeploymentProtectionRuleEvent) GetDeploymentCallbackURL() string { - if d == nil || d.DeploymentCallbackURL == nil { - return "" - } - return *d.DeploymentCallbackURL -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *DeploymentProtectionRuleEvent) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (d *DeploymentProtectionRuleEvent) GetEvent() string { - if d == nil || d.Event == nil { - return "" - } - return *d.Event -} - -// GetInstallation returns the Installation field. -func (d *DeploymentProtectionRuleEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetOrganization returns the Organization field. -func (d *DeploymentProtectionRuleEvent) GetOrganization() *Organization { - if d == nil { - return nil - } - return d.Organization -} - -// GetRepo returns the Repo field. -func (d *DeploymentProtectionRuleEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeploymentProtectionRuleEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetAutoMerge returns the AutoMerge field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetAutoMerge() bool { - if d == nil || d.AutoMerge == nil { - return false - } - return *d.AutoMerge -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetProductionEnvironment returns the ProductionEnvironment field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetProductionEnvironment() bool { - if d == nil || d.ProductionEnvironment == nil { - return false - } - return *d.ProductionEnvironment -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetRef() string { - if d == nil || d.Ref == nil { - return "" - } - return *d.Ref -} - -// GetRequiredContexts returns the RequiredContexts field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetRequiredContexts() []string { - if d == nil || d.RequiredContexts == nil { - return nil - } - return *d.RequiredContexts -} - -// GetTask returns the Task field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetTask() string { - if d == nil || d.Task == nil { - return "" - } - return *d.Task -} - -// GetTransientEnvironment returns the TransientEnvironment field if it's non-nil, zero value otherwise. -func (d *DeploymentRequest) GetTransientEnvironment() bool { - if d == nil || d.TransientEnvironment == nil { - return false - } - return *d.TransientEnvironment -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetCreator returns the Creator field. -func (d *DeploymentStatus) GetCreator() *User { - if d == nil { - return nil - } - return d.Creator -} - -// GetDeploymentURL returns the DeploymentURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetDeploymentURL() string { - if d == nil || d.DeploymentURL == nil { - return "" - } - return *d.DeploymentURL -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetEnvironmentURL returns the EnvironmentURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetEnvironmentURL() string { - if d == nil || d.EnvironmentURL == nil { - return "" - } - return *d.EnvironmentURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetID() int64 { - if d == nil || d.ID == nil { - return 0 - } - return *d.ID -} - -// GetLogURL returns the LogURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetLogURL() string { - if d == nil || d.LogURL == nil { - return "" - } - return *d.LogURL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetRepositoryURL() string { - if d == nil || d.RepositoryURL == nil { - return "" - } - return *d.RepositoryURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetTargetURL() string { - if d == nil || d.TargetURL == nil { - return "" - } - return *d.TargetURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatus) GetURL() string { - if d == nil || d.URL == nil { - return "" - } - return *d.URL -} - -// GetDeployment returns the Deployment field. -func (d *DeploymentStatusEvent) GetDeployment() *Deployment { - if d == nil { - return nil - } - return d.Deployment -} - -// GetDeploymentStatus returns the DeploymentStatus field. -func (d *DeploymentStatusEvent) GetDeploymentStatus() *DeploymentStatus { - if d == nil { - return nil - } - return d.DeploymentStatus -} - -// GetInstallation returns the Installation field. -func (d *DeploymentStatusEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetRepo returns the Repo field. -func (d *DeploymentStatusEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DeploymentStatusEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetAutoInactive returns the AutoInactive field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetAutoInactive() bool { - if d == nil || d.AutoInactive == nil { - return false - } - return *d.AutoInactive -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetEnvironment() string { - if d == nil || d.Environment == nil { - return "" - } - return *d.Environment -} - -// GetEnvironmentURL returns the EnvironmentURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetEnvironmentURL() string { - if d == nil || d.EnvironmentURL == nil { - return "" - } - return *d.EnvironmentURL -} - -// GetLogURL returns the LogURL field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetLogURL() string { - if d == nil || d.LogURL == nil { - return "" - } - return *d.LogURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *DeploymentStatusRequest) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetActiveLockReason returns the ActiveLockReason field if it's non-nil, zero value otherwise. -func (d *Discussion) GetActiveLockReason() string { - if d == nil || d.ActiveLockReason == nil { - return "" - } - return *d.ActiveLockReason -} - -// GetAnswerChosenAt returns the AnswerChosenAt field if it's non-nil, zero value otherwise. -func (d *Discussion) GetAnswerChosenAt() Timestamp { - if d == nil || d.AnswerChosenAt == nil { - return Timestamp{} - } - return *d.AnswerChosenAt -} - -// GetAnswerChosenBy returns the AnswerChosenBy field if it's non-nil, zero value otherwise. -func (d *Discussion) GetAnswerChosenBy() string { - if d == nil || d.AnswerChosenBy == nil { - return "" - } - return *d.AnswerChosenBy -} - -// GetAnswerHTMLURL returns the AnswerHTMLURL field if it's non-nil, zero value otherwise. -func (d *Discussion) GetAnswerHTMLURL() string { - if d == nil || d.AnswerHTMLURL == nil { - return "" - } - return *d.AnswerHTMLURL -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (d *Discussion) GetAuthorAssociation() string { - if d == nil || d.AuthorAssociation == nil { - return "" - } - return *d.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (d *Discussion) GetBody() string { - if d == nil || d.Body == nil { - return "" - } - return *d.Body -} - -// GetComments returns the Comments field if it's non-nil, zero value otherwise. -func (d *Discussion) GetComments() int { - if d == nil || d.Comments == nil { - return 0 - } - return *d.Comments -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *Discussion) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetDiscussionCategory returns the DiscussionCategory field. -func (d *Discussion) GetDiscussionCategory() *DiscussionCategory { - if d == nil { - return nil - } - return d.DiscussionCategory -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (d *Discussion) GetHTMLURL() string { - if d == nil || d.HTMLURL == nil { - return "" - } - return *d.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (d *Discussion) GetID() int64 { - if d == nil || d.ID == nil { - return 0 - } - return *d.ID -} - -// GetLocked returns the Locked field if it's non-nil, zero value otherwise. -func (d *Discussion) GetLocked() bool { - if d == nil || d.Locked == nil { - return false - } - return *d.Locked -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *Discussion) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (d *Discussion) GetNumber() int { - if d == nil || d.Number == nil { - return 0 - } - return *d.Number -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (d *Discussion) GetRepositoryURL() string { - if d == nil || d.RepositoryURL == nil { - return "" - } - return *d.RepositoryURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *Discussion) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (d *Discussion) GetTitle() string { - if d == nil || d.Title == nil { - return "" - } - return *d.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *Discussion) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetUser returns the User field. -func (d *Discussion) GetUser() *User { - if d == nil { - return nil - } - return d.User -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetDescription() string { - if d == nil || d.Description == nil { - return "" - } - return *d.Description -} - -// GetEmoji returns the Emoji field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetEmoji() string { - if d == nil || d.Emoji == nil { - return "" - } - return *d.Emoji -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetID() int64 { - if d == nil || d.ID == nil { - return 0 - } - return *d.ID -} - -// GetIsAnswerable returns the IsAnswerable field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetIsAnswerable() bool { - if d == nil || d.IsAnswerable == nil { - return false - } - return *d.IsAnswerable -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetName() string { - if d == nil || d.Name == nil { - return "" - } - return *d.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetRepositoryID() int64 { - if d == nil || d.RepositoryID == nil { - return 0 - } - return *d.RepositoryID -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetSlug() string { - if d == nil || d.Slug == nil { - return "" - } - return *d.Slug -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionCategory) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetAuthor returns the Author field. -func (d *DiscussionComment) GetAuthor() *User { - if d == nil { - return nil - } - return d.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetBody() string { - if d == nil || d.Body == nil { - return "" - } - return *d.Body -} - -// GetBodyHTML returns the BodyHTML field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetBodyHTML() string { - if d == nil || d.BodyHTML == nil { - return "" - } - return *d.BodyHTML -} - -// GetBodyVersion returns the BodyVersion field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetBodyVersion() string { - if d == nil || d.BodyVersion == nil { - return "" - } - return *d.BodyVersion -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetCreatedAt() Timestamp { - if d == nil || d.CreatedAt == nil { - return Timestamp{} - } - return *d.CreatedAt -} - -// GetDiscussionURL returns the DiscussionURL field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetDiscussionURL() string { - if d == nil || d.DiscussionURL == nil { - return "" - } - return *d.DiscussionURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetHTMLURL() string { - if d == nil || d.HTMLURL == nil { - return "" - } - return *d.HTMLURL -} - -// GetLastEditedAt returns the LastEditedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetLastEditedAt() Timestamp { - if d == nil || d.LastEditedAt == nil { - return Timestamp{} - } - return *d.LastEditedAt -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetNodeID() string { - if d == nil || d.NodeID == nil { - return "" - } - return *d.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetNumber() int { - if d == nil || d.Number == nil { - return 0 - } - return *d.Number -} - -// GetReactions returns the Reactions field. -func (d *DiscussionComment) GetReactions() *Reactions { - if d == nil { - return nil - } - return d.Reactions -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetUpdatedAt() Timestamp { - if d == nil || d.UpdatedAt == nil { - return Timestamp{} - } - return *d.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (d *DiscussionComment) GetURL() string { - if d == nil || d.URL == nil { - return "" - } - return *d.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (d *DiscussionCommentEvent) GetAction() string { - if d == nil || d.Action == nil { - return "" - } - return *d.Action -} - -// GetComment returns the Comment field. -func (d *DiscussionCommentEvent) GetComment() *CommentDiscussion { - if d == nil { - return nil - } - return d.Comment -} - -// GetDiscussion returns the Discussion field. -func (d *DiscussionCommentEvent) GetDiscussion() *Discussion { - if d == nil { - return nil - } - return d.Discussion -} - -// GetInstallation returns the Installation field. -func (d *DiscussionCommentEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetOrg returns the Org field. -func (d *DiscussionCommentEvent) GetOrg() *Organization { - if d == nil { - return nil - } - return d.Org -} - -// GetRepo returns the Repo field. -func (d *DiscussionCommentEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DiscussionCommentEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (d *DiscussionEvent) GetAction() string { - if d == nil || d.Action == nil { - return "" - } - return *d.Action -} - -// GetDiscussion returns the Discussion field. -func (d *DiscussionEvent) GetDiscussion() *Discussion { - if d == nil { - return nil - } - return d.Discussion -} - -// GetInstallation returns the Installation field. -func (d *DiscussionEvent) GetInstallation() *Installation { - if d == nil { - return nil - } - return d.Installation -} - -// GetOrg returns the Org field. -func (d *DiscussionEvent) GetOrg() *Organization { - if d == nil { - return nil - } - return d.Org -} - -// GetRepo returns the Repo field. -func (d *DiscussionEvent) GetRepo() *Repository { - if d == nil { - return nil - } - return d.Repo -} - -// GetSender returns the Sender field. -func (d *DiscussionEvent) GetSender() *User { - if d == nil { - return nil - } - return d.Sender -} - -// GetApps returns the Apps field if it's non-nil, zero value otherwise. -func (d *DismissalRestrictionsRequest) GetApps() []string { - if d == nil || d.Apps == nil { - return nil - } - return *d.Apps -} - -// GetTeams returns the Teams field if it's non-nil, zero value otherwise. -func (d *DismissalRestrictionsRequest) GetTeams() []string { - if d == nil || d.Teams == nil { - return nil - } - return *d.Teams -} - -// GetUsers returns the Users field if it's non-nil, zero value otherwise. -func (d *DismissalRestrictionsRequest) GetUsers() []string { - if d == nil || d.Users == nil { - return nil - } - return *d.Users -} - -// GetDismissalCommitID returns the DismissalCommitID field if it's non-nil, zero value otherwise. -func (d *DismissedReview) GetDismissalCommitID() string { - if d == nil || d.DismissalCommitID == nil { - return "" - } - return *d.DismissalCommitID -} - -// GetDismissalMessage returns the DismissalMessage field if it's non-nil, zero value otherwise. -func (d *DismissedReview) GetDismissalMessage() string { - if d == nil || d.DismissalMessage == nil { - return "" - } - return *d.DismissalMessage -} - -// GetReviewID returns the ReviewID field if it's non-nil, zero value otherwise. -func (d *DismissedReview) GetReviewID() int64 { - if d == nil || d.ReviewID == nil { - return 0 - } - return *d.ReviewID -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (d *DismissedReview) GetState() string { - if d == nil || d.State == nil { - return "" - } - return *d.State -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (d *DismissStaleReviewsOnPushChanges) GetFrom() bool { - if d == nil || d.From == nil { - return false - } - return *d.From -} - -// GetClientPayload returns the ClientPayload field if it's non-nil, zero value otherwise. -func (d *DispatchRequestOptions) GetClientPayload() json.RawMessage { - if d == nil || d.ClientPayload == nil { - return json.RawMessage{} - } - return *d.ClientPayload -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetBody() string { - if d == nil || d.Body == nil { - return "" - } - return *d.Body -} - -// GetLine returns the Line field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetLine() int { - if d == nil || d.Line == nil { - return 0 - } - return *d.Line -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetPath() string { - if d == nil || d.Path == nil { - return "" - } - return *d.Path -} - -// GetPosition returns the Position field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetPosition() int { - if d == nil || d.Position == nil { - return 0 - } - return *d.Position -} - -// GetSide returns the Side field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetSide() string { - if d == nil || d.Side == nil { - return "" - } - return *d.Side -} - -// GetStartLine returns the StartLine field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetStartLine() int { - if d == nil || d.StartLine == nil { - return 0 - } - return *d.StartLine -} - -// GetStartSide returns the StartSide field if it's non-nil, zero value otherwise. -func (d *DraftReviewComment) GetStartSide() string { - if d == nil || d.StartSide == nil { - return "" - } - return *d.StartSide -} - -// GetRef returns the Ref field. -func (e *EditBase) GetRef() *EditRef { - if e == nil { - return nil - } - return e.Ref -} - -// GetSHA returns the SHA field. -func (e *EditBase) GetSHA() *EditSHA { - if e == nil { - return nil - } - return e.SHA -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (e *EditBody) GetFrom() string { - if e == nil || e.From == nil { - return "" - } - return *e.From -} - -// GetBase returns the Base field. -func (e *EditChange) GetBase() *EditBase { - if e == nil { - return nil - } - return e.Base -} - -// GetBody returns the Body field. -func (e *EditChange) GetBody() *EditBody { - if e == nil { - return nil - } - return e.Body -} - -// GetOwner returns the Owner field. -func (e *EditChange) GetOwner() *EditOwner { - if e == nil { - return nil - } - return e.Owner -} - -// GetRepo returns the Repo field. -func (e *EditChange) GetRepo() *EditRepo { - if e == nil { - return nil - } - return e.Repo -} - -// GetTitle returns the Title field. -func (e *EditChange) GetTitle() *EditTitle { - if e == nil { - return nil - } - return e.Title -} - -// GetOwnerInfo returns the OwnerInfo field. -func (e *EditOwner) GetOwnerInfo() *OwnerInfo { - if e == nil { - return nil - } - return e.OwnerInfo -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (e *EditRef) GetFrom() string { - if e == nil || e.From == nil { - return "" - } - return *e.From -} - -// GetName returns the Name field. -func (e *EditRepo) GetName() *RepoName { - if e == nil { - return nil - } - return e.Name -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (e *EditSHA) GetFrom() string { - if e == nil || e.From == nil { - return "" - } - return *e.From -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (e *EditTitle) GetFrom() string { - if e == nil || e.From == nil { - return "" - } - return *e.From -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetAvatarURL() string { - if e == nil || e.AvatarURL == nil { - return "" - } - return *e.AvatarURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetCreatedAt() Timestamp { - if e == nil || e.CreatedAt == nil { - return Timestamp{} - } - return *e.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetDescription() string { - if e == nil || e.Description == nil { - return "" - } - return *e.Description -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetHTMLURL() string { - if e == nil || e.HTMLURL == nil { - return "" - } - return *e.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetID() int { - if e == nil || e.ID == nil { - return 0 - } - return *e.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetName() string { - if e == nil || e.Name == nil { - return "" - } - return *e.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetNodeID() string { - if e == nil || e.NodeID == nil { - return "" - } - return *e.NodeID -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetSlug() string { - if e == nil || e.Slug == nil { - return "" - } - return *e.Slug -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetUpdatedAt() Timestamp { - if e == nil || e.UpdatedAt == nil { - return Timestamp{} - } - return *e.UpdatedAt -} - -// GetWebsiteURL returns the WebsiteURL field if it's non-nil, zero value otherwise. -func (e *Enterprise) GetWebsiteURL() string { - if e == nil || e.WebsiteURL == nil { - return "" - } - return *e.WebsiteURL -} - -// GetAdvancedSecurityEnabledForNewRepositories returns the AdvancedSecurityEnabledForNewRepositories field if it's non-nil, zero value otherwise. -func (e *EnterpriseSecurityAnalysisSettings) GetAdvancedSecurityEnabledForNewRepositories() bool { - if e == nil || e.AdvancedSecurityEnabledForNewRepositories == nil { - return false - } - return *e.AdvancedSecurityEnabledForNewRepositories -} - -// GetSecretScanningEnabledForNewRepositories returns the SecretScanningEnabledForNewRepositories field if it's non-nil, zero value otherwise. -func (e *EnterpriseSecurityAnalysisSettings) GetSecretScanningEnabledForNewRepositories() bool { - if e == nil || e.SecretScanningEnabledForNewRepositories == nil { - return false - } - return *e.SecretScanningEnabledForNewRepositories -} - -// GetSecretScanningPushProtectionCustomLink returns the SecretScanningPushProtectionCustomLink field if it's non-nil, zero value otherwise. -func (e *EnterpriseSecurityAnalysisSettings) GetSecretScanningPushProtectionCustomLink() string { - if e == nil || e.SecretScanningPushProtectionCustomLink == nil { - return "" - } - return *e.SecretScanningPushProtectionCustomLink -} - -// GetSecretScanningPushProtectionEnabledForNewRepositories returns the SecretScanningPushProtectionEnabledForNewRepositories field if it's non-nil, zero value otherwise. -func (e *EnterpriseSecurityAnalysisSettings) GetSecretScanningPushProtectionEnabledForNewRepositories() bool { - if e == nil || e.SecretScanningPushProtectionEnabledForNewRepositories == nil { - return false - } - return *e.SecretScanningPushProtectionEnabledForNewRepositories -} - -// GetCanAdminsBypass returns the CanAdminsBypass field if it's non-nil, zero value otherwise. -func (e *Environment) GetCanAdminsBypass() bool { - if e == nil || e.CanAdminsBypass == nil { - return false - } - return *e.CanAdminsBypass -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (e *Environment) GetCreatedAt() Timestamp { - if e == nil || e.CreatedAt == nil { - return Timestamp{} - } - return *e.CreatedAt -} - -// GetDeploymentBranchPolicy returns the DeploymentBranchPolicy field. -func (e *Environment) GetDeploymentBranchPolicy() *BranchPolicy { - if e == nil { - return nil - } - return e.DeploymentBranchPolicy -} - -// GetEnvironmentName returns the EnvironmentName field if it's non-nil, zero value otherwise. -func (e *Environment) GetEnvironmentName() string { - if e == nil || e.EnvironmentName == nil { - return "" - } - return *e.EnvironmentName -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (e *Environment) GetHTMLURL() string { - if e == nil || e.HTMLURL == nil { - return "" - } - return *e.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (e *Environment) GetID() int64 { - if e == nil || e.ID == nil { - return 0 - } - return *e.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (e *Environment) GetName() string { - if e == nil || e.Name == nil { - return "" - } - return *e.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (e *Environment) GetNodeID() string { - if e == nil || e.NodeID == nil { - return "" - } - return *e.NodeID -} - -// GetOwner returns the Owner field if it's non-nil, zero value otherwise. -func (e *Environment) GetOwner() string { - if e == nil || e.Owner == nil { - return "" - } - return *e.Owner -} - -// GetRepo returns the Repo field if it's non-nil, zero value otherwise. -func (e *Environment) GetRepo() string { - if e == nil || e.Repo == nil { - return "" - } - return *e.Repo -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (e *Environment) GetUpdatedAt() Timestamp { - if e == nil || e.UpdatedAt == nil { - return Timestamp{} - } - return *e.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (e *Environment) GetURL() string { - if e == nil || e.URL == nil { - return "" - } - return *e.URL -} - -// GetWaitTimer returns the WaitTimer field if it's non-nil, zero value otherwise. -func (e *Environment) GetWaitTimer() int { - if e == nil || e.WaitTimer == nil { - return 0 - } - return *e.WaitTimer -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (e *EnvResponse) GetTotalCount() int { - if e == nil || e.TotalCount == nil { - return 0 - } - return *e.TotalCount -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (e *EnvReviewers) GetID() int64 { - if e == nil || e.ID == nil { - return 0 - } - return *e.ID -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (e *EnvReviewers) GetType() string { - if e == nil || e.Type == nil { - return "" - } - return *e.Type -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (e *ErrorBlock) GetCreatedAt() Timestamp { - if e == nil || e.CreatedAt == nil { - return Timestamp{} - } - return *e.CreatedAt -} - -// GetBlock returns the Block field. -func (e *ErrorResponse) GetBlock() *ErrorBlock { - if e == nil { - return nil - } - return e.Block -} - -// GetActor returns the Actor field. -func (e *Event) GetActor() *User { - if e == nil { - return nil - } - return e.Actor -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (e *Event) GetCreatedAt() Timestamp { - if e == nil || e.CreatedAt == nil { - return Timestamp{} - } - return *e.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (e *Event) GetID() string { - if e == nil || e.ID == nil { - return "" - } - return *e.ID -} - -// GetOrg returns the Org field. -func (e *Event) GetOrg() *Organization { - if e == nil { - return nil - } - return e.Org -} - -// GetPublic returns the Public field if it's non-nil, zero value otherwise. -func (e *Event) GetPublic() bool { - if e == nil || e.Public == nil { - return false - } - return *e.Public -} - -// GetRawPayload returns the RawPayload field if it's non-nil, zero value otherwise. -func (e *Event) GetRawPayload() json.RawMessage { - if e == nil || e.RawPayload == nil { - return json.RawMessage{} - } - return *e.RawPayload -} - -// GetRepo returns the Repo field. -func (e *Event) GetRepo() *Repository { - if e == nil { - return nil - } - return e.Repo -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (e *Event) GetType() string { - if e == nil || e.Type == nil { - return "" - } - return *e.Type -} - -// GetGroupID returns the GroupID field if it's non-nil, zero value otherwise. -func (e *ExternalGroup) GetGroupID() int64 { - if e == nil || e.GroupID == nil { - return 0 - } - return *e.GroupID -} - -// GetGroupName returns the GroupName field if it's non-nil, zero value otherwise. -func (e *ExternalGroup) GetGroupName() string { - if e == nil || e.GroupName == nil { - return "" - } - return *e.GroupName -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (e *ExternalGroup) GetUpdatedAt() Timestamp { - if e == nil || e.UpdatedAt == nil { - return Timestamp{} - } - return *e.UpdatedAt -} - -// GetMemberEmail returns the MemberEmail field if it's non-nil, zero value otherwise. -func (e *ExternalGroupMember) GetMemberEmail() string { - if e == nil || e.MemberEmail == nil { - return "" - } - return *e.MemberEmail -} - -// GetMemberID returns the MemberID field if it's non-nil, zero value otherwise. -func (e *ExternalGroupMember) GetMemberID() int64 { - if e == nil || e.MemberID == nil { - return 0 - } - return *e.MemberID -} - -// GetMemberLogin returns the MemberLogin field if it's non-nil, zero value otherwise. -func (e *ExternalGroupMember) GetMemberLogin() string { - if e == nil || e.MemberLogin == nil { - return "" - } - return *e.MemberLogin -} - -// GetMemberName returns the MemberName field if it's non-nil, zero value otherwise. -func (e *ExternalGroupMember) GetMemberName() string { - if e == nil || e.MemberName == nil { - return "" - } - return *e.MemberName -} - -// GetTeamID returns the TeamID field if it's non-nil, zero value otherwise. -func (e *ExternalGroupTeam) GetTeamID() int64 { - if e == nil || e.TeamID == nil { - return 0 - } - return *e.TeamID -} - -// GetTeamName returns the TeamName field if it's non-nil, zero value otherwise. -func (e *ExternalGroupTeam) GetTeamName() string { - if e == nil || e.TeamName == nil { - return "" - } - return *e.TeamName -} - -// GetHRef returns the HRef field if it's non-nil, zero value otherwise. -func (f *FeedLink) GetHRef() string { - if f == nil || f.HRef == nil { - return "" - } - return *f.HRef -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (f *FeedLink) GetType() string { - if f == nil || f.Type == nil { - return "" - } - return *f.Type -} - -// GetCurrentUser returns the CurrentUser field. -func (f *FeedLinks) GetCurrentUser() *FeedLink { - if f == nil { - return nil - } - return f.CurrentUser -} - -// GetCurrentUserActor returns the CurrentUserActor field. -func (f *FeedLinks) GetCurrentUserActor() *FeedLink { - if f == nil { - return nil - } - return f.CurrentUserActor -} - -// GetCurrentUserOrganization returns the CurrentUserOrganization field. -func (f *FeedLinks) GetCurrentUserOrganization() *FeedLink { - if f == nil { - return nil - } - return f.CurrentUserOrganization -} - -// GetCurrentUserPublic returns the CurrentUserPublic field. -func (f *FeedLinks) GetCurrentUserPublic() *FeedLink { - if f == nil { - return nil - } - return f.CurrentUserPublic -} - -// GetTimeline returns the Timeline field. -func (f *FeedLinks) GetTimeline() *FeedLink { - if f == nil { - return nil - } - return f.Timeline -} - -// GetUser returns the User field. -func (f *FeedLinks) GetUser() *FeedLink { - if f == nil { - return nil - } - return f.User -} - -// GetCurrentUserActorURL returns the CurrentUserActorURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserActorURL() string { - if f == nil || f.CurrentUserActorURL == nil { - return "" - } - return *f.CurrentUserActorURL -} - -// GetCurrentUserOrganizationURL returns the CurrentUserOrganizationURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserOrganizationURL() string { - if f == nil || f.CurrentUserOrganizationURL == nil { - return "" - } - return *f.CurrentUserOrganizationURL -} - -// GetCurrentUserPublicURL returns the CurrentUserPublicURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserPublicURL() string { - if f == nil || f.CurrentUserPublicURL == nil { - return "" - } - return *f.CurrentUserPublicURL -} - -// GetCurrentUserURL returns the CurrentUserURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetCurrentUserURL() string { - if f == nil || f.CurrentUserURL == nil { - return "" - } - return *f.CurrentUserURL -} - -// GetLinks returns the Links field. -func (f *Feeds) GetLinks() *FeedLinks { - if f == nil { - return nil - } - return f.Links -} - -// GetTimelineURL returns the TimelineURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetTimelineURL() string { - if f == nil || f.TimelineURL == nil { - return "" - } - return *f.TimelineURL -} - -// GetUserURL returns the UserURL field if it's non-nil, zero value otherwise. -func (f *Feeds) GetUserURL() string { - if f == nil || f.UserURL == nil { - return "" - } - return *f.UserURL -} - -// GetIdentifier returns the Identifier field if it's non-nil, zero value otherwise. -func (f *FirstPatchedVersion) GetIdentifier() string { - if f == nil || f.Identifier == nil { - return "" - } - return *f.Identifier -} - -// GetForkee returns the Forkee field. -func (f *ForkEvent) GetForkee() *Repository { - if f == nil { - return nil - } - return f.Forkee -} - -// GetInstallation returns the Installation field. -func (f *ForkEvent) GetInstallation() *Installation { - if f == nil { - return nil - } - return f.Installation -} - -// GetRepo returns the Repo field. -func (f *ForkEvent) GetRepo() *Repository { - if f == nil { - return nil - } - return f.Repo -} - -// GetSender returns the Sender field. -func (f *ForkEvent) GetSender() *User { - if f == nil { - return nil - } - return f.Sender -} - -// GetWorkFolder returns the WorkFolder field if it's non-nil, zero value otherwise. -func (g *GenerateJITConfigRequest) GetWorkFolder() string { - if g == nil || g.WorkFolder == nil { - return "" - } - return *g.WorkFolder -} - -// GetPreviousTagName returns the PreviousTagName field if it's non-nil, zero value otherwise. -func (g *GenerateNotesOptions) GetPreviousTagName() string { - if g == nil || g.PreviousTagName == nil { - return "" - } - return *g.PreviousTagName -} - -// GetTargetCommitish returns the TargetCommitish field if it's non-nil, zero value otherwise. -func (g *GenerateNotesOptions) GetTargetCommitish() string { - if g == nil || g.TargetCommitish == nil { - return "" - } - return *g.TargetCommitish -} - -// GetInclude returns the Include field if it's non-nil, zero value otherwise. -func (g *GetAuditLogOptions) GetInclude() string { - if g == nil || g.Include == nil { - return "" - } - return *g.Include -} - -// GetOrder returns the Order field if it's non-nil, zero value otherwise. -func (g *GetAuditLogOptions) GetOrder() string { - if g == nil || g.Order == nil { - return "" - } - return *g.Order -} - -// GetPhrase returns the Phrase field if it's non-nil, zero value otherwise. -func (g *GetAuditLogOptions) GetPhrase() string { - if g == nil || g.Phrase == nil { - return "" - } - return *g.Phrase -} - -// GetComments returns the Comments field if it's non-nil, zero value otherwise. -func (g *Gist) GetComments() int { - if g == nil || g.Comments == nil { - return 0 - } - return *g.Comments -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *Gist) GetCreatedAt() Timestamp { - if g == nil || g.CreatedAt == nil { - return Timestamp{} - } - return *g.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (g *Gist) GetDescription() string { - if g == nil || g.Description == nil { - return "" - } - return *g.Description -} - -// GetFiles returns the Files map if it's non-nil, an empty map otherwise. -func (g *Gist) GetFiles() map[GistFilename]GistFile { - if g == nil || g.Files == nil { - return map[GistFilename]GistFile{} - } - return g.Files -} - -// GetGitPullURL returns the GitPullURL field if it's non-nil, zero value otherwise. -func (g *Gist) GetGitPullURL() string { - if g == nil || g.GitPullURL == nil { - return "" - } - return *g.GitPullURL -} - -// GetGitPushURL returns the GitPushURL field if it's non-nil, zero value otherwise. -func (g *Gist) GetGitPushURL() string { - if g == nil || g.GitPushURL == nil { - return "" - } - return *g.GitPushURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (g *Gist) GetHTMLURL() string { - if g == nil || g.HTMLURL == nil { - return "" - } - return *g.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *Gist) GetID() string { - if g == nil || g.ID == nil { - return "" - } - return *g.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (g *Gist) GetNodeID() string { - if g == nil || g.NodeID == nil { - return "" - } - return *g.NodeID -} - -// GetOwner returns the Owner field. -func (g *Gist) GetOwner() *User { - if g == nil { - return nil - } - return g.Owner -} - -// GetPublic returns the Public field if it's non-nil, zero value otherwise. -func (g *Gist) GetPublic() bool { - if g == nil || g.Public == nil { - return false - } - return *g.Public -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (g *Gist) GetUpdatedAt() Timestamp { - if g == nil || g.UpdatedAt == nil { - return Timestamp{} - } - return *g.UpdatedAt -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (g *GistComment) GetBody() string { - if g == nil || g.Body == nil { - return "" - } - return *g.Body -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GistComment) GetCreatedAt() Timestamp { - if g == nil || g.CreatedAt == nil { - return Timestamp{} - } - return *g.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *GistComment) GetID() int64 { - if g == nil || g.ID == nil { - return 0 - } - return *g.ID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GistComment) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetUser returns the User field. -func (g *GistComment) GetUser() *User { - if g == nil { - return nil - } - return g.User -} - -// GetChangeStatus returns the ChangeStatus field. -func (g *GistCommit) GetChangeStatus() *CommitStats { - if g == nil { - return nil - } - return g.ChangeStatus -} - -// GetCommittedAt returns the CommittedAt field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetCommittedAt() Timestamp { - if g == nil || g.CommittedAt == nil { - return Timestamp{} - } - return *g.CommittedAt -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetNodeID() string { - if g == nil || g.NodeID == nil { - return "" - } - return *g.NodeID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetUser returns the User field. -func (g *GistCommit) GetUser() *User { - if g == nil { - return nil - } - return g.User -} - -// GetVersion returns the Version field if it's non-nil, zero value otherwise. -func (g *GistCommit) GetVersion() string { - if g == nil || g.Version == nil { - return "" - } - return *g.Version -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (g *GistFile) GetContent() string { - if g == nil || g.Content == nil { - return "" - } - return *g.Content -} - -// GetFilename returns the Filename field if it's non-nil, zero value otherwise. -func (g *GistFile) GetFilename() string { - if g == nil || g.Filename == nil { - return "" - } - return *g.Filename -} - -// GetLanguage returns the Language field if it's non-nil, zero value otherwise. -func (g *GistFile) GetLanguage() string { - if g == nil || g.Language == nil { - return "" - } - return *g.Language -} - -// GetRawURL returns the RawURL field if it's non-nil, zero value otherwise. -func (g *GistFile) GetRawURL() string { - if g == nil || g.RawURL == nil { - return "" - } - return *g.RawURL -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (g *GistFile) GetSize() int { - if g == nil || g.Size == nil { - return 0 - } - return *g.Size -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (g *GistFile) GetType() string { - if g == nil || g.Type == nil { - return "" - } - return *g.Type -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GistFork) GetCreatedAt() Timestamp { - if g == nil || g.CreatedAt == nil { - return Timestamp{} - } - return *g.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *GistFork) GetID() string { - if g == nil || g.ID == nil { - return "" - } - return *g.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (g *GistFork) GetNodeID() string { - if g == nil || g.NodeID == nil { - return "" - } - return *g.NodeID -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (g *GistFork) GetUpdatedAt() Timestamp { - if g == nil || g.UpdatedAt == nil { - return Timestamp{} - } - return *g.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GistFork) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetUser returns the User field. -func (g *GistFork) GetUser() *User { - if g == nil { - return nil - } - return g.User -} - -// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise. -func (g *GistStats) GetPrivateGists() int { - if g == nil || g.PrivateGists == nil { - return 0 - } - return *g.PrivateGists -} - -// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise. -func (g *GistStats) GetPublicGists() int { - if g == nil || g.PublicGists == nil { - return 0 - } - return *g.PublicGists -} - -// GetTotalGists returns the TotalGists field if it's non-nil, zero value otherwise. -func (g *GistStats) GetTotalGists() int { - if g == nil || g.TotalGists == nil { - return 0 - } - return *g.TotalGists -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (g *GitHubAppAuthorizationEvent) GetAction() string { - if g == nil || g.Action == nil { - return "" - } - return *g.Action -} - -// GetInstallation returns the Installation field. -func (g *GitHubAppAuthorizationEvent) GetInstallation() *Installation { - if g == nil { - return nil - } - return g.Installation -} - -// GetSender returns the Sender field. -func (g *GitHubAppAuthorizationEvent) GetSender() *User { - if g == nil { - return nil - } - return g.Sender -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (g *Gitignore) GetName() string { - if g == nil || g.Name == nil { - return "" - } - return *g.Name -} - -// GetSource returns the Source field if it's non-nil, zero value otherwise. -func (g *Gitignore) GetSource() string { - if g == nil || g.Source == nil { - return "" - } - return *g.Source -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (g *GitObject) GetSHA() string { - if g == nil || g.SHA == nil { - return "" - } - return *g.SHA -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (g *GitObject) GetType() string { - if g == nil || g.Type == nil { - return "" - } - return *g.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *GitObject) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetInstallation returns the Installation field. -func (g *GollumEvent) GetInstallation() *Installation { - if g == nil { - return nil - } - return g.Installation -} - -// GetRepo returns the Repo field. -func (g *GollumEvent) GetRepo() *Repository { - if g == nil { - return nil - } - return g.Repo -} - -// GetSender returns the Sender field. -func (g *GollumEvent) GetSender() *User { - if g == nil { - return nil - } - return g.Sender -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (g *GPGEmail) GetEmail() string { - if g == nil || g.Email == nil { - return "" - } - return *g.Email -} - -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (g *GPGEmail) GetVerified() bool { - if g == nil || g.Verified == nil { - return false - } - return *g.Verified -} - -// GetCanCertify returns the CanCertify field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanCertify() bool { - if g == nil || g.CanCertify == nil { - return false - } - return *g.CanCertify -} - -// GetCanEncryptComms returns the CanEncryptComms field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanEncryptComms() bool { - if g == nil || g.CanEncryptComms == nil { - return false - } - return *g.CanEncryptComms -} - -// GetCanEncryptStorage returns the CanEncryptStorage field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanEncryptStorage() bool { - if g == nil || g.CanEncryptStorage == nil { - return false - } - return *g.CanEncryptStorage -} - -// GetCanSign returns the CanSign field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCanSign() bool { - if g == nil || g.CanSign == nil { - return false - } - return *g.CanSign -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCreatedAt() Timestamp { - if g == nil || g.CreatedAt == nil { - return Timestamp{} - } - return *g.CreatedAt -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetExpiresAt() Timestamp { - if g == nil || g.ExpiresAt == nil { - return Timestamp{} - } - return *g.ExpiresAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetID() int64 { - if g == nil || g.ID == nil { - return 0 - } - return *g.ID -} - -// GetKeyID returns the KeyID field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetKeyID() string { - if g == nil || g.KeyID == nil { - return "" - } - return *g.KeyID -} - -// GetPrimaryKeyID returns the PrimaryKeyID field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetPrimaryKeyID() int64 { - if g == nil || g.PrimaryKeyID == nil { - return 0 - } - return *g.PrimaryKeyID -} - -// GetPublicKey returns the PublicKey field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetPublicKey() string { - if g == nil || g.PublicKey == nil { - return "" - } - return *g.PublicKey -} - -// GetRawKey returns the RawKey field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetRawKey() string { - if g == nil || g.RawKey == nil { - return "" - } - return *g.RawKey -} - -// GetApp returns the App field. -func (g *Grant) GetApp() *AuthorizationApp { - if g == nil { - return nil - } - return g.App -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *Grant) GetCreatedAt() Timestamp { - if g == nil || g.CreatedAt == nil { - return Timestamp{} - } - return *g.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (g *Grant) GetID() int64 { - if g == nil || g.ID == nil { - return 0 - } - return *g.ID -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (g *Grant) GetUpdatedAt() Timestamp { - if g == nil || g.UpdatedAt == nil { - return Timestamp{} - } - return *g.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (g *Grant) GetURL() string { - if g == nil || g.URL == nil { - return "" - } - return *g.URL -} - -// GetAuthor returns the Author field. -func (h *HeadCommit) GetAuthor() *CommitAuthor { - if h == nil { - return nil - } - return h.Author -} - -// GetCommitter returns the Committer field. -func (h *HeadCommit) GetCommitter() *CommitAuthor { - if h == nil { - return nil - } - return h.Committer -} - -// GetDistinct returns the Distinct field if it's non-nil, zero value otherwise. -func (h *HeadCommit) GetDistinct() bool { - if h == nil || h.Distinct == nil { - return false - } - return *h.Distinct -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (h *HeadCommit) GetID() string { - if h == nil || h.ID == nil { - return "" - } - return *h.ID -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (h *HeadCommit) GetMessage() string { - if h == nil || h.Message == nil { - return "" - } - return *h.Message -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (h *HeadCommit) GetSHA() string { - if h == nil || h.SHA == nil { - return "" - } - return *h.SHA -} - -// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise. -func (h *HeadCommit) GetTimestamp() Timestamp { - if h == nil || h.Timestamp == nil { - return Timestamp{} - } - return *h.Timestamp -} - -// GetTreeID returns the TreeID field if it's non-nil, zero value otherwise. -func (h *HeadCommit) GetTreeID() string { - if h == nil || h.TreeID == nil { - return "" - } - return *h.TreeID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (h *HeadCommit) GetURL() string { - if h == nil || h.URL == nil { - return "" - } - return *h.URL -} - -// GetActive returns the Active field if it's non-nil, zero value otherwise. -func (h *Hook) GetActive() bool { - if h == nil || h.Active == nil { - return false - } - return *h.Active -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (h *Hook) GetCreatedAt() Timestamp { - if h == nil || h.CreatedAt == nil { - return Timestamp{} - } - return *h.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (h *Hook) GetID() int64 { - if h == nil || h.ID == nil { - return 0 - } - return *h.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (h *Hook) GetName() string { - if h == nil || h.Name == nil { - return "" - } - return *h.Name -} - -// GetPingURL returns the PingURL field if it's non-nil, zero value otherwise. -func (h *Hook) GetPingURL() string { - if h == nil || h.PingURL == nil { - return "" - } - return *h.PingURL -} - -// GetTestURL returns the TestURL field if it's non-nil, zero value otherwise. -func (h *Hook) GetTestURL() string { - if h == nil || h.TestURL == nil { - return "" - } - return *h.TestURL -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (h *Hook) GetType() string { - if h == nil || h.Type == nil { - return "" - } - return *h.Type -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (h *Hook) GetUpdatedAt() Timestamp { - if h == nil || h.UpdatedAt == nil { - return Timestamp{} - } - return *h.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (h *Hook) GetURL() string { - if h == nil || h.URL == nil { - return "" - } - return *h.URL -} - -// GetContentType returns the ContentType field if it's non-nil, zero value otherwise. -func (h *HookConfig) GetContentType() string { - if h == nil || h.ContentType == nil { - return "" - } - return *h.ContentType -} - -// GetInsecureSSL returns the InsecureSSL field if it's non-nil, zero value otherwise. -func (h *HookConfig) GetInsecureSSL() string { - if h == nil || h.InsecureSSL == nil { - return "" - } - return *h.InsecureSSL -} - -// GetSecret returns the Secret field if it's non-nil, zero value otherwise. -func (h *HookConfig) GetSecret() string { - if h == nil || h.Secret == nil { - return "" - } - return *h.Secret -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (h *HookConfig) GetURL() string { - if h == nil || h.URL == nil { - return "" - } - return *h.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetAction() string { - if h == nil || h.Action == nil { - return "" - } - return *h.Action -} - -// GetDeliveredAt returns the DeliveredAt field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetDeliveredAt() Timestamp { - if h == nil || h.DeliveredAt == nil { - return Timestamp{} - } - return *h.DeliveredAt -} - -// GetDuration returns the Duration field. -func (h *HookDelivery) GetDuration() *float64 { - if h == nil { - return nil - } - return h.Duration -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetEvent() string { - if h == nil || h.Event == nil { - return "" - } - return *h.Event -} - -// GetGUID returns the GUID field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetGUID() string { - if h == nil || h.GUID == nil { - return "" - } - return *h.GUID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetID() int64 { - if h == nil || h.ID == nil { - return 0 - } - return *h.ID -} - -// GetInstallationID returns the InstallationID field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetInstallationID() int64 { - if h == nil || h.InstallationID == nil { - return 0 - } - return *h.InstallationID -} - -// GetRedelivery returns the Redelivery field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetRedelivery() bool { - if h == nil || h.Redelivery == nil { - return false - } - return *h.Redelivery -} - -// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetRepositoryID() int64 { - if h == nil || h.RepositoryID == nil { - return 0 - } - return *h.RepositoryID -} - -// GetRequest returns the Request field. -func (h *HookDelivery) GetRequest() *HookRequest { - if h == nil { - return nil - } - return h.Request -} - -// GetResponse returns the Response field. -func (h *HookDelivery) GetResponse() *HookResponse { - if h == nil { - return nil - } - return h.Response -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetStatus() string { - if h == nil || h.Status == nil { - return "" - } - return *h.Status -} - -// GetStatusCode returns the StatusCode field if it's non-nil, zero value otherwise. -func (h *HookDelivery) GetStatusCode() int { - if h == nil || h.StatusCode == nil { - return 0 - } - return *h.StatusCode -} - -// GetHeaders returns the Headers map if it's non-nil, an empty map otherwise. -func (h *HookRequest) GetHeaders() map[string]string { - if h == nil || h.Headers == nil { - return map[string]string{} - } - return h.Headers -} - -// GetRawPayload returns the RawPayload field if it's non-nil, zero value otherwise. -func (h *HookRequest) GetRawPayload() json.RawMessage { - if h == nil || h.RawPayload == nil { - return json.RawMessage{} - } - return *h.RawPayload -} - -// GetHeaders returns the Headers map if it's non-nil, an empty map otherwise. -func (h *HookResponse) GetHeaders() map[string]string { - if h == nil || h.Headers == nil { - return map[string]string{} - } - return h.Headers -} - -// GetRawPayload returns the RawPayload field if it's non-nil, zero value otherwise. -func (h *HookResponse) GetRawPayload() json.RawMessage { - if h == nil || h.RawPayload == nil { - return json.RawMessage{} - } - return *h.RawPayload -} - -// GetActiveHooks returns the ActiveHooks field if it's non-nil, zero value otherwise. -func (h *HookStats) GetActiveHooks() int { - if h == nil || h.ActiveHooks == nil { - return 0 - } - return *h.ActiveHooks -} - -// GetInactiveHooks returns the InactiveHooks field if it's non-nil, zero value otherwise. -func (h *HookStats) GetInactiveHooks() int { - if h == nil || h.InactiveHooks == nil { - return 0 - } - return *h.InactiveHooks -} - -// GetTotalHooks returns the TotalHooks field if it's non-nil, zero value otherwise. -func (h *HookStats) GetTotalHooks() int { - if h == nil || h.TotalHooks == nil { - return 0 - } - return *h.TotalHooks -} - -// GetGroupDescription returns the GroupDescription field if it's non-nil, zero value otherwise. -func (i *IDPGroup) GetGroupDescription() string { - if i == nil || i.GroupDescription == nil { - return "" - } - return *i.GroupDescription -} - -// GetGroupID returns the GroupID field if it's non-nil, zero value otherwise. -func (i *IDPGroup) GetGroupID() string { - if i == nil || i.GroupID == nil { - return "" - } - return *i.GroupID -} - -// GetGroupName returns the GroupName field if it's non-nil, zero value otherwise. -func (i *IDPGroup) GetGroupName() string { - if i == nil || i.GroupName == nil { - return "" - } - return *i.GroupName -} - -// GetAuthorsCount returns the AuthorsCount field if it's non-nil, zero value otherwise. -func (i *Import) GetAuthorsCount() int { - if i == nil || i.AuthorsCount == nil { - return 0 - } - return *i.AuthorsCount -} - -// GetAuthorsURL returns the AuthorsURL field if it's non-nil, zero value otherwise. -func (i *Import) GetAuthorsURL() string { - if i == nil || i.AuthorsURL == nil { - return "" - } - return *i.AuthorsURL -} - -// GetCommitCount returns the CommitCount field if it's non-nil, zero value otherwise. -func (i *Import) GetCommitCount() int { - if i == nil || i.CommitCount == nil { - return 0 - } - return *i.CommitCount -} - -// GetFailedStep returns the FailedStep field if it's non-nil, zero value otherwise. -func (i *Import) GetFailedStep() string { - if i == nil || i.FailedStep == nil { - return "" - } - return *i.FailedStep -} - -// GetHasLargeFiles returns the HasLargeFiles field if it's non-nil, zero value otherwise. -func (i *Import) GetHasLargeFiles() bool { - if i == nil || i.HasLargeFiles == nil { - return false - } - return *i.HasLargeFiles -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *Import) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetHumanName returns the HumanName field if it's non-nil, zero value otherwise. -func (i *Import) GetHumanName() string { - if i == nil || i.HumanName == nil { - return "" - } - return *i.HumanName -} - -// GetLargeFilesCount returns the LargeFilesCount field if it's non-nil, zero value otherwise. -func (i *Import) GetLargeFilesCount() int { - if i == nil || i.LargeFilesCount == nil { - return 0 - } - return *i.LargeFilesCount -} - -// GetLargeFilesSize returns the LargeFilesSize field if it's non-nil, zero value otherwise. -func (i *Import) GetLargeFilesSize() int { - if i == nil || i.LargeFilesSize == nil { - return 0 - } - return *i.LargeFilesSize -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (i *Import) GetMessage() string { - if i == nil || i.Message == nil { - return "" - } - return *i.Message -} - -// GetPercent returns the Percent field if it's non-nil, zero value otherwise. -func (i *Import) GetPercent() int { - if i == nil || i.Percent == nil { - return 0 - } - return *i.Percent -} - -// GetPushPercent returns the PushPercent field if it's non-nil, zero value otherwise. -func (i *Import) GetPushPercent() int { - if i == nil || i.PushPercent == nil { - return 0 - } - return *i.PushPercent -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (i *Import) GetRepositoryURL() string { - if i == nil || i.RepositoryURL == nil { - return "" - } - return *i.RepositoryURL -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (i *Import) GetStatus() string { - if i == nil || i.Status == nil { - return "" - } - return *i.Status -} - -// GetStatusText returns the StatusText field if it's non-nil, zero value otherwise. -func (i *Import) GetStatusText() string { - if i == nil || i.StatusText == nil { - return "" - } - return *i.StatusText -} - -// GetTFVCProject returns the TFVCProject field if it's non-nil, zero value otherwise. -func (i *Import) GetTFVCProject() string { - if i == nil || i.TFVCProject == nil { - return "" - } - return *i.TFVCProject -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *Import) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetUseLFS returns the UseLFS field if it's non-nil, zero value otherwise. -func (i *Import) GetUseLFS() string { - if i == nil || i.UseLFS == nil { - return "" - } - return *i.UseLFS -} - -// GetVCS returns the VCS field if it's non-nil, zero value otherwise. -func (i *Import) GetVCS() string { - if i == nil || i.VCS == nil { - return "" - } - return *i.VCS -} - -// GetVCSPassword returns the VCSPassword field if it's non-nil, zero value otherwise. -func (i *Import) GetVCSPassword() string { - if i == nil || i.VCSPassword == nil { - return "" - } - return *i.VCSPassword -} - -// GetVCSURL returns the VCSURL field if it's non-nil, zero value otherwise. -func (i *Import) GetVCSURL() string { - if i == nil || i.VCSURL == nil { - return "" - } - return *i.VCSURL -} - -// GetVCSUsername returns the VCSUsername field if it's non-nil, zero value otherwise. -func (i *Import) GetVCSUsername() string { - if i == nil || i.VCSUsername == nil { - return "" - } - return *i.VCSUsername -} - -// GetAccessTokensURL returns the AccessTokensURL field if it's non-nil, zero value otherwise. -func (i *Installation) GetAccessTokensURL() string { - if i == nil || i.AccessTokensURL == nil { - return "" - } - return *i.AccessTokensURL -} - -// GetAccount returns the Account field. -func (i *Installation) GetAccount() *User { - if i == nil { - return nil - } - return i.Account -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (i *Installation) GetAppID() int64 { - if i == nil || i.AppID == nil { - return 0 - } - return *i.AppID -} - -// GetAppSlug returns the AppSlug field if it's non-nil, zero value otherwise. -func (i *Installation) GetAppSlug() string { - if i == nil || i.AppSlug == nil { - return "" - } - return *i.AppSlug -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Installation) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetHasMultipleSingleFiles returns the HasMultipleSingleFiles field if it's non-nil, zero value otherwise. -func (i *Installation) GetHasMultipleSingleFiles() bool { - if i == nil || i.HasMultipleSingleFiles == nil { - return false - } - return *i.HasMultipleSingleFiles -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *Installation) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *Installation) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (i *Installation) GetNodeID() string { - if i == nil || i.NodeID == nil { - return "" - } - return *i.NodeID -} - -// GetPermissions returns the Permissions field. -func (i *Installation) GetPermissions() *InstallationPermissions { - if i == nil { - return nil - } - return i.Permissions -} - -// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise. -func (i *Installation) GetRepositoriesURL() string { - if i == nil || i.RepositoriesURL == nil { - return "" - } - return *i.RepositoriesURL -} - -// GetRepositorySelection returns the RepositorySelection field if it's non-nil, zero value otherwise. -func (i *Installation) GetRepositorySelection() string { - if i == nil || i.RepositorySelection == nil { - return "" - } - return *i.RepositorySelection -} - -// GetSingleFileName returns the SingleFileName field if it's non-nil, zero value otherwise. -func (i *Installation) GetSingleFileName() string { - if i == nil || i.SingleFileName == nil { - return "" - } - return *i.SingleFileName -} - -// GetSuspendedAt returns the SuspendedAt field if it's non-nil, zero value otherwise. -func (i *Installation) GetSuspendedAt() Timestamp { - if i == nil || i.SuspendedAt == nil { - return Timestamp{} - } - return *i.SuspendedAt -} - -// GetSuspendedBy returns the SuspendedBy field. -func (i *Installation) GetSuspendedBy() *User { - if i == nil { - return nil - } - return i.SuspendedBy -} - -// GetTargetID returns the TargetID field if it's non-nil, zero value otherwise. -func (i *Installation) GetTargetID() int64 { - if i == nil || i.TargetID == nil { - return 0 - } - return *i.TargetID -} - -// GetTargetType returns the TargetType field if it's non-nil, zero value otherwise. -func (i *Installation) GetTargetType() string { - if i == nil || i.TargetType == nil { - return "" - } - return *i.TargetType -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *Installation) GetUpdatedAt() Timestamp { - if i == nil || i.UpdatedAt == nil { - return Timestamp{} - } - return *i.UpdatedAt -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *InstallationEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetInstallation returns the Installation field. -func (i *InstallationEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetRequester returns the Requester field. -func (i *InstallationEvent) GetRequester() *User { - if i == nil { - return nil - } - return i.Requester -} - -// GetSender returns the Sender field. -func (i *InstallationEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetActions returns the Actions field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetActions() string { - if i == nil || i.Actions == nil { - return "" - } - return *i.Actions -} - -// GetAdministration returns the Administration field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetAdministration() string { - if i == nil || i.Administration == nil { - return "" - } - return *i.Administration -} - -// GetBlocking returns the Blocking field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetBlocking() string { - if i == nil || i.Blocking == nil { - return "" - } - return *i.Blocking -} - -// GetChecks returns the Checks field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetChecks() string { - if i == nil || i.Checks == nil { - return "" - } - return *i.Checks -} - -// GetContentReferences returns the ContentReferences field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetContentReferences() string { - if i == nil || i.ContentReferences == nil { - return "" - } - return *i.ContentReferences -} - -// GetContents returns the Contents field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetContents() string { - if i == nil || i.Contents == nil { - return "" - } - return *i.Contents -} - -// GetDeployments returns the Deployments field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetDeployments() string { - if i == nil || i.Deployments == nil { - return "" - } - return *i.Deployments -} - -// GetEmails returns the Emails field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetEmails() string { - if i == nil || i.Emails == nil { - return "" - } - return *i.Emails -} - -// GetEnvironments returns the Environments field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetEnvironments() string { - if i == nil || i.Environments == nil { - return "" - } - return *i.Environments -} - -// GetFollowers returns the Followers field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetFollowers() string { - if i == nil || i.Followers == nil { - return "" - } - return *i.Followers -} - -// GetIssues returns the Issues field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetIssues() string { - if i == nil || i.Issues == nil { - return "" - } - return *i.Issues -} - -// GetMembers returns the Members field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetMembers() string { - if i == nil || i.Members == nil { - return "" - } - return *i.Members -} - -// GetMetadata returns the Metadata field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetMetadata() string { - if i == nil || i.Metadata == nil { - return "" - } - return *i.Metadata -} - -// GetOrganizationAdministration returns the OrganizationAdministration field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationAdministration() string { - if i == nil || i.OrganizationAdministration == nil { - return "" - } - return *i.OrganizationAdministration -} - -// GetOrganizationCustomRoles returns the OrganizationCustomRoles field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationCustomRoles() string { - if i == nil || i.OrganizationCustomRoles == nil { - return "" - } - return *i.OrganizationCustomRoles -} - -// GetOrganizationHooks returns the OrganizationHooks field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationHooks() string { - if i == nil || i.OrganizationHooks == nil { - return "" - } - return *i.OrganizationHooks -} - -// GetOrganizationPackages returns the OrganizationPackages field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationPackages() string { - if i == nil || i.OrganizationPackages == nil { - return "" - } - return *i.OrganizationPackages -} - -// GetOrganizationPlan returns the OrganizationPlan field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationPlan() string { - if i == nil || i.OrganizationPlan == nil { - return "" - } - return *i.OrganizationPlan -} - -// GetOrganizationPreReceiveHooks returns the OrganizationPreReceiveHooks field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationPreReceiveHooks() string { - if i == nil || i.OrganizationPreReceiveHooks == nil { - return "" - } - return *i.OrganizationPreReceiveHooks -} - -// GetOrganizationProjects returns the OrganizationProjects field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationProjects() string { - if i == nil || i.OrganizationProjects == nil { - return "" - } - return *i.OrganizationProjects -} - -// GetOrganizationSecrets returns the OrganizationSecrets field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationSecrets() string { - if i == nil || i.OrganizationSecrets == nil { - return "" - } - return *i.OrganizationSecrets -} - -// GetOrganizationSelfHostedRunners returns the OrganizationSelfHostedRunners field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationSelfHostedRunners() string { - if i == nil || i.OrganizationSelfHostedRunners == nil { - return "" - } - return *i.OrganizationSelfHostedRunners -} - -// GetOrganizationUserBlocking returns the OrganizationUserBlocking field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetOrganizationUserBlocking() string { - if i == nil || i.OrganizationUserBlocking == nil { - return "" - } - return *i.OrganizationUserBlocking -} - -// GetPackages returns the Packages field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetPackages() string { - if i == nil || i.Packages == nil { - return "" - } - return *i.Packages -} - -// GetPages returns the Pages field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetPages() string { - if i == nil || i.Pages == nil { - return "" - } - return *i.Pages -} - -// GetPullRequests returns the PullRequests field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetPullRequests() string { - if i == nil || i.PullRequests == nil { - return "" - } - return *i.PullRequests -} - -// GetRepositoryHooks returns the RepositoryHooks field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetRepositoryHooks() string { - if i == nil || i.RepositoryHooks == nil { - return "" - } - return *i.RepositoryHooks -} - -// GetRepositoryPreReceiveHooks returns the RepositoryPreReceiveHooks field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetRepositoryPreReceiveHooks() string { - if i == nil || i.RepositoryPreReceiveHooks == nil { - return "" - } - return *i.RepositoryPreReceiveHooks -} - -// GetRepositoryProjects returns the RepositoryProjects field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetRepositoryProjects() string { - if i == nil || i.RepositoryProjects == nil { - return "" - } - return *i.RepositoryProjects -} - -// GetSecrets returns the Secrets field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetSecrets() string { - if i == nil || i.Secrets == nil { - return "" - } - return *i.Secrets -} - -// GetSecretScanningAlerts returns the SecretScanningAlerts field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetSecretScanningAlerts() string { - if i == nil || i.SecretScanningAlerts == nil { - return "" - } - return *i.SecretScanningAlerts -} - -// GetSecurityEvents returns the SecurityEvents field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetSecurityEvents() string { - if i == nil || i.SecurityEvents == nil { - return "" - } - return *i.SecurityEvents -} - -// GetSingleFile returns the SingleFile field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetSingleFile() string { - if i == nil || i.SingleFile == nil { - return "" - } - return *i.SingleFile -} - -// GetStatuses returns the Statuses field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetStatuses() string { - if i == nil || i.Statuses == nil { - return "" - } - return *i.Statuses -} - -// GetTeamDiscussions returns the TeamDiscussions field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetTeamDiscussions() string { - if i == nil || i.TeamDiscussions == nil { - return "" - } - return *i.TeamDiscussions -} - -// GetVulnerabilityAlerts returns the VulnerabilityAlerts field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetVulnerabilityAlerts() string { - if i == nil || i.VulnerabilityAlerts == nil { - return "" - } - return *i.VulnerabilityAlerts -} - -// GetWorkflows returns the Workflows field if it's non-nil, zero value otherwise. -func (i *InstallationPermissions) GetWorkflows() string { - if i == nil || i.Workflows == nil { - return "" - } - return *i.Workflows -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *InstallationRepositoriesEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetInstallation returns the Installation field. -func (i *InstallationRepositoriesEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetRepositorySelection returns the RepositorySelection field if it's non-nil, zero value otherwise. -func (i *InstallationRepositoriesEvent) GetRepositorySelection() string { - if i == nil || i.RepositorySelection == nil { - return "" - } - return *i.RepositorySelection -} - -// GetSender returns the Sender field. -func (i *InstallationRepositoriesEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (i *InstallationToken) GetExpiresAt() Timestamp { - if i == nil || i.ExpiresAt == nil { - return Timestamp{} - } - return *i.ExpiresAt -} - -// GetPermissions returns the Permissions field. -func (i *InstallationToken) GetPermissions() *InstallationPermissions { - if i == nil { - return nil - } - return i.Permissions -} - -// GetToken returns the Token field if it's non-nil, zero value otherwise. -func (i *InstallationToken) GetToken() string { - if i == nil || i.Token == nil { - return "" - } - return *i.Token -} - -// GetPermissions returns the Permissions field. -func (i *InstallationTokenOptions) GetPermissions() *InstallationPermissions { - if i == nil { - return nil - } - return i.Permissions -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (i *InteractionRestriction) GetExpiresAt() Timestamp { - if i == nil || i.ExpiresAt == nil { - return Timestamp{} - } - return *i.ExpiresAt -} - -// GetLimit returns the Limit field if it's non-nil, zero value otherwise. -func (i *InteractionRestriction) GetLimit() string { - if i == nil || i.Limit == nil { - return "" - } - return *i.Limit -} - -// GetOrigin returns the Origin field if it's non-nil, zero value otherwise. -func (i *InteractionRestriction) GetOrigin() string { - if i == nil || i.Origin == nil { - return "" - } - return *i.Origin -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Invitation) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (i *Invitation) GetEmail() string { - if i == nil || i.Email == nil { - return "" - } - return *i.Email -} - -// GetFailedAt returns the FailedAt field if it's non-nil, zero value otherwise. -func (i *Invitation) GetFailedAt() Timestamp { - if i == nil || i.FailedAt == nil { - return Timestamp{} - } - return *i.FailedAt -} - -// GetFailedReason returns the FailedReason field if it's non-nil, zero value otherwise. -func (i *Invitation) GetFailedReason() string { - if i == nil || i.FailedReason == nil { - return "" - } - return *i.FailedReason -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *Invitation) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetInvitationTeamURL returns the InvitationTeamURL field if it's non-nil, zero value otherwise. -func (i *Invitation) GetInvitationTeamURL() string { - if i == nil || i.InvitationTeamURL == nil { - return "" - } - return *i.InvitationTeamURL -} - -// GetInviter returns the Inviter field. -func (i *Invitation) GetInviter() *User { - if i == nil { - return nil - } - return i.Inviter -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (i *Invitation) GetLogin() string { - if i == nil || i.Login == nil { - return "" - } - return *i.Login -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (i *Invitation) GetNodeID() string { - if i == nil || i.NodeID == nil { - return "" - } - return *i.NodeID -} - -// GetRole returns the Role field if it's non-nil, zero value otherwise. -func (i *Invitation) GetRole() string { - if i == nil || i.Role == nil { - return "" - } - return *i.Role -} - -// GetTeamCount returns the TeamCount field if it's non-nil, zero value otherwise. -func (i *Invitation) GetTeamCount() int { - if i == nil || i.TeamCount == nil { - return 0 - } - return *i.TeamCount -} - -// GetActiveLockReason returns the ActiveLockReason field if it's non-nil, zero value otherwise. -func (i *Issue) GetActiveLockReason() string { - if i == nil || i.ActiveLockReason == nil { - return "" - } - return *i.ActiveLockReason -} - -// GetAssignee returns the Assignee field. -func (i *Issue) GetAssignee() *User { - if i == nil { - return nil - } - return i.Assignee -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (i *Issue) GetAuthorAssociation() string { - if i == nil || i.AuthorAssociation == nil { - return "" - } - return *i.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (i *Issue) GetBody() string { - if i == nil || i.Body == nil { - return "" - } - return *i.Body -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetClosedAt() Timestamp { - if i == nil || i.ClosedAt == nil { - return Timestamp{} - } - return *i.ClosedAt -} - -// GetClosedBy returns the ClosedBy field. -func (i *Issue) GetClosedBy() *User { - if i == nil { - return nil - } - return i.ClosedBy -} - -// GetComments returns the Comments field if it's non-nil, zero value otherwise. -func (i *Issue) GetComments() int { - if i == nil || i.Comments == nil { - return 0 - } - return *i.Comments -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetCommentsURL() string { - if i == nil || i.CommentsURL == nil { - return "" - } - return *i.CommentsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetEventsURL() string { - if i == nil || i.EventsURL == nil { - return "" - } - return *i.EventsURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *Issue) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetLabelsURL() string { - if i == nil || i.LabelsURL == nil { - return "" - } - return *i.LabelsURL -} - -// GetLocked returns the Locked field if it's non-nil, zero value otherwise. -func (i *Issue) GetLocked() bool { - if i == nil || i.Locked == nil { - return false - } - return *i.Locked -} - -// GetMilestone returns the Milestone field. -func (i *Issue) GetMilestone() *Milestone { - if i == nil { - return nil - } - return i.Milestone -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (i *Issue) GetNodeID() string { - if i == nil || i.NodeID == nil { - return "" - } - return *i.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (i *Issue) GetNumber() int { - if i == nil || i.Number == nil { - return 0 - } - return *i.Number -} - -// GetPullRequestLinks returns the PullRequestLinks field. -func (i *Issue) GetPullRequestLinks() *PullRequestLinks { - if i == nil { - return nil - } - return i.PullRequestLinks -} - -// GetReactions returns the Reactions field. -func (i *Issue) GetReactions() *Reactions { - if i == nil { - return nil - } - return i.Reactions -} - -// GetRepository returns the Repository field. -func (i *Issue) GetRepository() *Repository { - if i == nil { - return nil - } - return i.Repository -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (i *Issue) GetRepositoryURL() string { - if i == nil || i.RepositoryURL == nil { - return "" - } - return *i.RepositoryURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (i *Issue) GetState() string { - if i == nil || i.State == nil { - return "" - } - return *i.State -} - -// GetStateReason returns the StateReason field if it's non-nil, zero value otherwise. -func (i *Issue) GetStateReason() string { - if i == nil || i.StateReason == nil { - return "" - } - return *i.StateReason -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (i *Issue) GetTitle() string { - if i == nil || i.Title == nil { - return "" - } - return *i.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetUpdatedAt() Timestamp { - if i == nil || i.UpdatedAt == nil { - return Timestamp{} - } - return *i.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *Issue) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetUser returns the User field. -func (i *Issue) GetUser() *User { - if i == nil { - return nil - } - return i.User -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetAuthorAssociation() string { - if i == nil || i.AuthorAssociation == nil { - return "" - } - return *i.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetBody() string { - if i == nil || i.Body == nil { - return "" - } - return *i.Body -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetHTMLURL() string { - if i == nil || i.HTMLURL == nil { - return "" - } - return *i.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetIssueURL returns the IssueURL field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetIssueURL() string { - if i == nil || i.IssueURL == nil { - return "" - } - return *i.IssueURL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetNodeID() string { - if i == nil || i.NodeID == nil { - return "" - } - return *i.NodeID -} - -// GetReactions returns the Reactions field. -func (i *IssueComment) GetReactions() *Reactions { - if i == nil { - return nil - } - return i.Reactions -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetUpdatedAt() Timestamp { - if i == nil || i.UpdatedAt == nil { - return Timestamp{} - } - return *i.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetUser returns the User field. -func (i *IssueComment) GetUser() *User { - if i == nil { - return nil - } - return i.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *IssueCommentEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetChanges returns the Changes field. -func (i *IssueCommentEvent) GetChanges() *EditChange { - if i == nil { - return nil - } - return i.Changes -} - -// GetComment returns the Comment field. -func (i *IssueCommentEvent) GetComment() *IssueComment { - if i == nil { - return nil - } - return i.Comment -} - -// GetInstallation returns the Installation field. -func (i *IssueCommentEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetIssue returns the Issue field. -func (i *IssueCommentEvent) GetIssue() *Issue { - if i == nil { - return nil - } - return i.Issue -} - -// GetOrganization returns the Organization field. -func (i *IssueCommentEvent) GetOrganization() *Organization { - if i == nil { - return nil - } - return i.Organization -} - -// GetRepo returns the Repo field. -func (i *IssueCommentEvent) GetRepo() *Repository { - if i == nil { - return nil - } - return i.Repo -} - -// GetSender returns the Sender field. -func (i *IssueCommentEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetActor returns the Actor field. -func (i *IssueEvent) GetActor() *User { - if i == nil { - return nil - } - return i.Actor -} - -// GetAssignee returns the Assignee field. -func (i *IssueEvent) GetAssignee() *User { - if i == nil { - return nil - } - return i.Assignee -} - -// GetAssigner returns the Assigner field. -func (i *IssueEvent) GetAssigner() *User { - if i == nil { - return nil - } - return i.Assigner -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetCommitID() string { - if i == nil || i.CommitID == nil { - return "" - } - return *i.CommitID -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetDismissedReview returns the DismissedReview field. -func (i *IssueEvent) GetDismissedReview() *DismissedReview { - if i == nil { - return nil - } - return i.DismissedReview -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetEvent() string { - if i == nil || i.Event == nil { - return "" - } - return *i.Event -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetID() int64 { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetIssue returns the Issue field. -func (i *IssueEvent) GetIssue() *Issue { - if i == nil { - return nil - } - return i.Issue -} - -// GetLabel returns the Label field. -func (i *IssueEvent) GetLabel() *Label { - if i == nil { - return nil - } - return i.Label -} - -// GetLockReason returns the LockReason field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetLockReason() string { - if i == nil || i.LockReason == nil { - return "" - } - return *i.LockReason -} - -// GetMilestone returns the Milestone field. -func (i *IssueEvent) GetMilestone() *Milestone { - if i == nil { - return nil - } - return i.Milestone -} - -// GetProjectCard returns the ProjectCard field. -func (i *IssueEvent) GetProjectCard() *ProjectCard { - if i == nil { - return nil - } - return i.ProjectCard -} - -// GetRename returns the Rename field. -func (i *IssueEvent) GetRename() *Rename { - if i == nil { - return nil - } - return i.Rename -} - -// GetRequestedReviewer returns the RequestedReviewer field. -func (i *IssueEvent) GetRequestedReviewer() *User { - if i == nil { - return nil - } - return i.RequestedReviewer -} - -// GetReviewRequester returns the ReviewRequester field. -func (i *IssueEvent) GetReviewRequester() *User { - if i == nil { - return nil - } - return i.ReviewRequester -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetAssignee returns the Assignee field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetAssignee() string { - if i == nil || i.Assignee == nil { - return "" - } - return *i.Assignee -} - -// GetClosed returns the Closed field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetClosed() bool { - if i == nil || i.Closed == nil { - return false - } - return *i.Closed -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetClosedAt() Timestamp { - if i == nil || i.ClosedAt == nil { - return Timestamp{} - } - return *i.ClosedAt -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetMilestone returns the Milestone field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetMilestone() int { - if i == nil || i.Milestone == nil { - return 0 - } - return *i.Milestone -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetUpdatedAt() Timestamp { - if i == nil || i.UpdatedAt == nil { - return Timestamp{} - } - return *i.UpdatedAt -} - -// GetCode returns the Code field if it's non-nil, zero value otherwise. -func (i *IssueImportError) GetCode() string { - if i == nil || i.Code == nil { - return "" - } - return *i.Code -} - -// GetField returns the Field field if it's non-nil, zero value otherwise. -func (i *IssueImportError) GetField() string { - if i == nil || i.Field == nil { - return "" - } - return *i.Field -} - -// GetLocation returns the Location field if it's non-nil, zero value otherwise. -func (i *IssueImportError) GetLocation() string { - if i == nil || i.Location == nil { - return "" - } - return *i.Location -} - -// GetResource returns the Resource field if it's non-nil, zero value otherwise. -func (i *IssueImportError) GetResource() string { - if i == nil || i.Resource == nil { - return "" - } - return *i.Resource -} - -// GetValue returns the Value field if it's non-nil, zero value otherwise. -func (i *IssueImportError) GetValue() string { - if i == nil || i.Value == nil { - return "" - } - return *i.Value -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetCreatedAt() Timestamp { - if i == nil || i.CreatedAt == nil { - return Timestamp{} - } - return *i.CreatedAt -} - -// GetDocumentationURL returns the DocumentationURL field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetDocumentationURL() string { - if i == nil || i.DocumentationURL == nil { - return "" - } - return *i.DocumentationURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetID() int { - if i == nil || i.ID == nil { - return 0 - } - return *i.ID -} - -// GetImportIssuesURL returns the ImportIssuesURL field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetImportIssuesURL() string { - if i == nil || i.ImportIssuesURL == nil { - return "" - } - return *i.ImportIssuesURL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetMessage() string { - if i == nil || i.Message == nil { - return "" - } - return *i.Message -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetRepositoryURL() string { - if i == nil || i.RepositoryURL == nil { - return "" - } - return *i.RepositoryURL -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetStatus() string { - if i == nil || i.Status == nil { - return "" - } - return *i.Status -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetUpdatedAt() Timestamp { - if i == nil || i.UpdatedAt == nil { - return Timestamp{} - } - return *i.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetURL() string { - if i == nil || i.URL == nil { - return "" - } - return *i.URL -} - -// GetDirection returns the Direction field if it's non-nil, zero value otherwise. -func (i *IssueListCommentsOptions) GetDirection() string { - if i == nil || i.Direction == nil { - return "" - } - return *i.Direction -} - -// GetSince returns the Since field if it's non-nil, zero value otherwise. -func (i *IssueListCommentsOptions) GetSince() time.Time { - if i == nil || i.Since == nil { - return time.Time{} - } - return *i.Since -} - -// GetSort returns the Sort field if it's non-nil, zero value otherwise. -func (i *IssueListCommentsOptions) GetSort() string { - if i == nil || i.Sort == nil { - return "" - } - return *i.Sort -} - -// GetAssignee returns the Assignee field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetAssignee() string { - if i == nil || i.Assignee == nil { - return "" - } - return *i.Assignee -} - -// GetAssignees returns the Assignees field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetAssignees() []string { - if i == nil || i.Assignees == nil { - return nil - } - return *i.Assignees -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetBody() string { - if i == nil || i.Body == nil { - return "" - } - return *i.Body -} - -// GetLabels returns the Labels field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetLabels() []string { - if i == nil || i.Labels == nil { - return nil - } - return *i.Labels -} - -// GetMilestone returns the Milestone field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetMilestone() int { - if i == nil || i.Milestone == nil { - return 0 - } - return *i.Milestone -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetState() string { - if i == nil || i.State == nil { - return "" - } - return *i.State -} - -// GetStateReason returns the StateReason field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetStateReason() string { - if i == nil || i.StateReason == nil { - return "" - } - return *i.StateReason -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (i *IssueRequest) GetTitle() string { - if i == nil || i.Title == nil { - return "" - } - return *i.Title -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (i *IssuesEvent) GetAction() string { - if i == nil || i.Action == nil { - return "" - } - return *i.Action -} - -// GetAssignee returns the Assignee field. -func (i *IssuesEvent) GetAssignee() *User { - if i == nil { - return nil - } - return i.Assignee -} - -// GetChanges returns the Changes field. -func (i *IssuesEvent) GetChanges() *EditChange { - if i == nil { - return nil - } - return i.Changes -} - -// GetInstallation returns the Installation field. -func (i *IssuesEvent) GetInstallation() *Installation { - if i == nil { - return nil - } - return i.Installation -} - -// GetIssue returns the Issue field. -func (i *IssuesEvent) GetIssue() *Issue { - if i == nil { - return nil - } - return i.Issue -} - -// GetLabel returns the Label field. -func (i *IssuesEvent) GetLabel() *Label { - if i == nil { - return nil - } - return i.Label -} - -// GetMilestone returns the Milestone field. -func (i *IssuesEvent) GetMilestone() *Milestone { - if i == nil { - return nil - } - return i.Milestone -} - -// GetRepo returns the Repo field. -func (i *IssuesEvent) GetRepo() *Repository { - if i == nil { - return nil - } - return i.Repo -} - -// GetSender returns the Sender field. -func (i *IssuesEvent) GetSender() *User { - if i == nil { - return nil - } - return i.Sender -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (i *IssuesSearchResult) GetIncompleteResults() bool { - if i == nil || i.IncompleteResults == nil { - return false - } - return *i.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (i *IssuesSearchResult) GetTotal() int { - if i == nil || i.Total == nil { - return 0 - } - return *i.Total -} - -// GetClosedIssues returns the ClosedIssues field if it's non-nil, zero value otherwise. -func (i *IssueStats) GetClosedIssues() int { - if i == nil || i.ClosedIssues == nil { - return 0 - } - return *i.ClosedIssues -} - -// GetOpenIssues returns the OpenIssues field if it's non-nil, zero value otherwise. -func (i *IssueStats) GetOpenIssues() int { - if i == nil || i.OpenIssues == nil { - return 0 - } - return *i.OpenIssues -} - -// GetTotalIssues returns the TotalIssues field if it's non-nil, zero value otherwise. -func (i *IssueStats) GetTotalIssues() int { - if i == nil || i.TotalIssues == nil { - return 0 - } - return *i.TotalIssues -} - -// GetEncodedJITConfig returns the EncodedJITConfig field if it's non-nil, zero value otherwise. -func (j *JITRunnerConfig) GetEncodedJITConfig() string { - if j == nil || j.EncodedJITConfig == nil { - return "" - } - return *j.EncodedJITConfig -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (j *Jobs) GetTotalCount() int { - if j == nil || j.TotalCount == nil { - return 0 - } - return *j.TotalCount -} - -// GetAddedBy returns the AddedBy field if it's non-nil, zero value otherwise. -func (k *Key) GetAddedBy() string { - if k == nil || k.AddedBy == nil { - return "" - } - return *k.AddedBy -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (k *Key) GetCreatedAt() Timestamp { - if k == nil || k.CreatedAt == nil { - return Timestamp{} - } - return *k.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (k *Key) GetID() int64 { - if k == nil || k.ID == nil { - return 0 - } - return *k.ID -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (k *Key) GetKey() string { - if k == nil || k.Key == nil { - return "" - } - return *k.Key -} - -// GetLastUsed returns the LastUsed field if it's non-nil, zero value otherwise. -func (k *Key) GetLastUsed() Timestamp { - if k == nil || k.LastUsed == nil { - return Timestamp{} - } - return *k.LastUsed -} - -// GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise. -func (k *Key) GetReadOnly() bool { - if k == nil || k.ReadOnly == nil { - return false - } - return *k.ReadOnly -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (k *Key) GetTitle() string { - if k == nil || k.Title == nil { - return "" - } - return *k.Title -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (k *Key) GetURL() string { - if k == nil || k.URL == nil { - return "" - } - return *k.URL -} - -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (k *Key) GetVerified() bool { - if k == nil || k.Verified == nil { - return false - } - return *k.Verified -} - -// GetColor returns the Color field if it's non-nil, zero value otherwise. -func (l *Label) GetColor() string { - if l == nil || l.Color == nil { - return "" - } - return *l.Color -} - -// GetDefault returns the Default field if it's non-nil, zero value otherwise. -func (l *Label) GetDefault() bool { - if l == nil || l.Default == nil { - return false - } - return *l.Default -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (l *Label) GetDescription() string { - if l == nil || l.Description == nil { - return "" - } - return *l.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (l *Label) GetID() int64 { - if l == nil || l.ID == nil { - return 0 - } - return *l.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (l *Label) GetName() string { - if l == nil || l.Name == nil { - return "" - } - return *l.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (l *Label) GetNodeID() string { - if l == nil || l.NodeID == nil { - return "" - } - return *l.NodeID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (l *Label) GetURL() string { - if l == nil || l.URL == nil { - return "" - } - return *l.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (l *LabelEvent) GetAction() string { - if l == nil || l.Action == nil { - return "" - } - return *l.Action -} - -// GetChanges returns the Changes field. -func (l *LabelEvent) GetChanges() *EditChange { - if l == nil { - return nil - } - return l.Changes -} - -// GetInstallation returns the Installation field. -func (l *LabelEvent) GetInstallation() *Installation { - if l == nil { - return nil - } - return l.Installation -} - -// GetLabel returns the Label field. -func (l *LabelEvent) GetLabel() *Label { - if l == nil { - return nil - } - return l.Label -} - -// GetOrg returns the Org field. -func (l *LabelEvent) GetOrg() *Organization { - if l == nil { - return nil - } - return l.Org -} - -// GetRepo returns the Repo field. -func (l *LabelEvent) GetRepo() *Repository { - if l == nil { - return nil - } - return l.Repo -} - -// GetSender returns the Sender field. -func (l *LabelEvent) GetSender() *User { - if l == nil { - return nil - } - return l.Sender -} - -// GetColor returns the Color field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetColor() string { - if l == nil || l.Color == nil { - return "" - } - return *l.Color -} - -// GetDefault returns the Default field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetDefault() bool { - if l == nil || l.Default == nil { - return false - } - return *l.Default -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetDescription() string { - if l == nil || l.Description == nil { - return "" - } - return *l.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetID() int64 { - if l == nil || l.ID == nil { - return 0 - } - return *l.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetName() string { - if l == nil || l.Name == nil { - return "" - } - return *l.Name -} - -// GetScore returns the Score field. -func (l *LabelResult) GetScore() *float64 { - if l == nil { - return nil - } - return l.Score -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (l *LabelResult) GetURL() string { - if l == nil || l.URL == nil { - return "" - } - return *l.URL -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (l *LabelsSearchResult) GetIncompleteResults() bool { - if l == nil || l.IncompleteResults == nil { - return false - } - return *l.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (l *LabelsSearchResult) GetTotal() int { - if l == nil || l.Total == nil { - return 0 - } - return *l.Total -} - -// GetOID returns the OID field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetOID() string { - if l == nil || l.OID == nil { - return "" - } - return *l.OID -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetPath() string { - if l == nil || l.Path == nil { - return "" - } - return *l.Path -} - -// GetRefName returns the RefName field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetRefName() string { - if l == nil || l.RefName == nil { - return "" - } - return *l.RefName -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (l *LargeFile) GetSize() int { - if l == nil || l.Size == nil { - return 0 - } - return *l.Size -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (l *License) GetBody() string { - if l == nil || l.Body == nil { - return "" - } - return *l.Body -} - -// GetConditions returns the Conditions field if it's non-nil, zero value otherwise. -func (l *License) GetConditions() []string { - if l == nil || l.Conditions == nil { - return nil - } - return *l.Conditions -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (l *License) GetDescription() string { - if l == nil || l.Description == nil { - return "" - } - return *l.Description -} - -// GetFeatured returns the Featured field if it's non-nil, zero value otherwise. -func (l *License) GetFeatured() bool { - if l == nil || l.Featured == nil { - return false - } - return *l.Featured -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (l *License) GetHTMLURL() string { - if l == nil || l.HTMLURL == nil { - return "" - } - return *l.HTMLURL -} - -// GetImplementation returns the Implementation field if it's non-nil, zero value otherwise. -func (l *License) GetImplementation() string { - if l == nil || l.Implementation == nil { - return "" - } - return *l.Implementation -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (l *License) GetKey() string { - if l == nil || l.Key == nil { - return "" - } - return *l.Key -} - -// GetLimitations returns the Limitations field if it's non-nil, zero value otherwise. -func (l *License) GetLimitations() []string { - if l == nil || l.Limitations == nil { - return nil - } - return *l.Limitations -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (l *License) GetName() string { - if l == nil || l.Name == nil { - return "" - } - return *l.Name -} - -// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise. -func (l *License) GetPermissions() []string { - if l == nil || l.Permissions == nil { - return nil - } - return *l.Permissions -} - -// GetSPDXID returns the SPDXID field if it's non-nil, zero value otherwise. -func (l *License) GetSPDXID() string { - if l == nil || l.SPDXID == nil { - return "" - } - return *l.SPDXID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (l *License) GetURL() string { - if l == nil || l.URL == nil { - return "" - } - return *l.URL -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (l *LinearHistoryRequirementEnforcementLevelChanges) GetFrom() string { - if l == nil || l.From == nil { - return "" - } - return *l.From -} - -// GetDirection returns the Direction field if it's non-nil, zero value otherwise. -func (l *ListAlertsOptions) GetDirection() string { - if l == nil || l.Direction == nil { - return "" - } - return *l.Direction -} - -// GetEcosystem returns the Ecosystem field if it's non-nil, zero value otherwise. -func (l *ListAlertsOptions) GetEcosystem() string { - if l == nil || l.Ecosystem == nil { - return "" - } - return *l.Ecosystem -} - -// GetPackage returns the Package field if it's non-nil, zero value otherwise. -func (l *ListAlertsOptions) GetPackage() string { - if l == nil || l.Package == nil { - return "" - } - return *l.Package -} - -// GetScope returns the Scope field if it's non-nil, zero value otherwise. -func (l *ListAlertsOptions) GetScope() string { - if l == nil || l.Scope == nil { - return "" - } - return *l.Scope -} - -// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. -func (l *ListAlertsOptions) GetSeverity() string { - if l == nil || l.Severity == nil { - return "" - } - return *l.Severity -} - -// GetSort returns the Sort field if it's non-nil, zero value otherwise. -func (l *ListAlertsOptions) GetSort() string { - if l == nil || l.Sort == nil { - return "" - } - return *l.Sort -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (l *ListAlertsOptions) GetState() string { - if l == nil || l.State == nil { - return "" - } - return *l.State -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsOptions) GetAppID() int64 { - if l == nil || l.AppID == nil { - return 0 - } - return *l.AppID -} - -// GetCheckName returns the CheckName field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsOptions) GetCheckName() string { - if l == nil || l.CheckName == nil { - return "" - } - return *l.CheckName -} - -// GetFilter returns the Filter field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsOptions) GetFilter() string { - if l == nil || l.Filter == nil { - return "" - } - return *l.Filter -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsOptions) GetStatus() string { - if l == nil || l.Status == nil { - return "" - } - return *l.Status -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (l *ListCheckRunsResults) GetTotal() int { - if l == nil || l.Total == nil { - return 0 - } - return *l.Total -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (l *ListCheckSuiteOptions) GetAppID() int { - if l == nil || l.AppID == nil { - return 0 - } - return *l.AppID -} - -// GetCheckName returns the CheckName field if it's non-nil, zero value otherwise. -func (l *ListCheckSuiteOptions) GetCheckName() string { - if l == nil || l.CheckName == nil { - return "" - } - return *l.CheckName -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (l *ListCheckSuiteResults) GetTotal() int { - if l == nil || l.Total == nil { - return 0 - } - return *l.Total -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (l *ListCodespaces) GetTotalCount() int { - if l == nil || l.TotalCount == nil { - return 0 - } - return *l.TotalCount -} - -// GetAffiliation returns the Affiliation field if it's non-nil, zero value otherwise. -func (l *ListCollaboratorOptions) GetAffiliation() string { - if l == nil || l.Affiliation == nil { - return "" - } - return *l.Affiliation -} - -// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. -func (l *ListExternalGroupsOptions) GetDisplayName() string { - if l == nil || l.DisplayName == nil { - return "" - } - return *l.DisplayName -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (l *ListRepositories) GetTotalCount() int { - if l == nil || l.TotalCount == nil { - return 0 - } - return *l.TotalCount -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (l *ListSCIMProvisionedIdentitiesOptions) GetCount() int { - if l == nil || l.Count == nil { - return 0 - } - return *l.Count -} - -// GetFilter returns the Filter field if it's non-nil, zero value otherwise. -func (l *ListSCIMProvisionedIdentitiesOptions) GetFilter() string { - if l == nil || l.Filter == nil { - return "" - } - return *l.Filter -} - -// GetStartIndex returns the StartIndex field if it's non-nil, zero value otherwise. -func (l *ListSCIMProvisionedIdentitiesOptions) GetStartIndex() int { - if l == nil || l.StartIndex == nil { - return 0 - } - return *l.StartIndex -} - -// GetEndColumn returns the EndColumn field if it's non-nil, zero value otherwise. -func (l *Location) GetEndColumn() int { - if l == nil || l.EndColumn == nil { - return 0 - } - return *l.EndColumn -} - -// GetEndLine returns the EndLine field if it's non-nil, zero value otherwise. -func (l *Location) GetEndLine() int { - if l == nil || l.EndLine == nil { - return 0 - } - return *l.EndLine -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (l *Location) GetPath() string { - if l == nil || l.Path == nil { - return "" - } - return *l.Path -} - -// GetStartColumn returns the StartColumn field if it's non-nil, zero value otherwise. -func (l *Location) GetStartColumn() int { - if l == nil || l.StartColumn == nil { - return 0 - } - return *l.StartColumn -} - -// GetStartLine returns the StartLine field if it's non-nil, zero value otherwise. -func (l *Location) GetStartLine() int { - if l == nil || l.StartLine == nil { - return 0 - } - return *l.StartLine -} - -// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. -func (l *LockBranch) GetEnabled() bool { - if l == nil || l.Enabled == nil { - return false - } - return *l.Enabled -} - -// GetEffectiveDate returns the EffectiveDate field if it's non-nil, zero value otherwise. -func (m *MarketplacePendingChange) GetEffectiveDate() Timestamp { - if m == nil || m.EffectiveDate == nil { - return Timestamp{} - } - return *m.EffectiveDate -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *MarketplacePendingChange) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetPlan returns the Plan field. -func (m *MarketplacePendingChange) GetPlan() *MarketplacePlan { - if m == nil { - return nil - } - return m.Plan -} - -// GetUnitCount returns the UnitCount field if it's non-nil, zero value otherwise. -func (m *MarketplacePendingChange) GetUnitCount() int { - if m == nil || m.UnitCount == nil { - return 0 - } - return *m.UnitCount -} - -// GetAccountsURL returns the AccountsURL field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetAccountsURL() string { - if m == nil || m.AccountsURL == nil { - return "" - } - return *m.AccountsURL -} - -// GetBullets returns the Bullets field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetBullets() []string { - if m == nil || m.Bullets == nil { - return nil - } - return *m.Bullets -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetDescription() string { - if m == nil || m.Description == nil { - return "" - } - return *m.Description -} - -// GetHasFreeTrial returns the HasFreeTrial field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetHasFreeTrial() bool { - if m == nil || m.HasFreeTrial == nil { - return false - } - return *m.HasFreeTrial -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetMonthlyPriceInCents returns the MonthlyPriceInCents field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetMonthlyPriceInCents() int { - if m == nil || m.MonthlyPriceInCents == nil { - return 0 - } - return *m.MonthlyPriceInCents -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetName() string { - if m == nil || m.Name == nil { - return "" - } - return *m.Name -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetNumber() int { - if m == nil || m.Number == nil { - return 0 - } - return *m.Number -} - -// GetPriceModel returns the PriceModel field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetPriceModel() string { - if m == nil || m.PriceModel == nil { - return "" - } - return *m.PriceModel -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetUnitName returns the UnitName field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetUnitName() string { - if m == nil || m.UnitName == nil { - return "" - } - return *m.UnitName -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetYearlyPriceInCents returns the YearlyPriceInCents field if it's non-nil, zero value otherwise. -func (m *MarketplacePlan) GetYearlyPriceInCents() int { - if m == nil || m.YearlyPriceInCents == nil { - return 0 - } - return *m.YearlyPriceInCents -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetLogin() string { - if m == nil || m.Login == nil { - return "" - } - return *m.Login -} - -// GetMarketplacePendingChange returns the MarketplacePendingChange field. -func (m *MarketplacePlanAccount) GetMarketplacePendingChange() *MarketplacePendingChange { - if m == nil { - return nil - } - return m.MarketplacePendingChange -} - -// GetMarketplacePurchase returns the MarketplacePurchase field. -func (m *MarketplacePlanAccount) GetMarketplacePurchase() *MarketplacePurchase { - if m == nil { - return nil - } - return m.MarketplacePurchase -} - -// GetOrganizationBillingEmail returns the OrganizationBillingEmail field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetOrganizationBillingEmail() string { - if m == nil || m.OrganizationBillingEmail == nil { - return "" - } - return *m.OrganizationBillingEmail -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetType() string { - if m == nil || m.Type == nil { - return "" - } - return *m.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *MarketplacePlanAccount) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetAccount returns the Account field. -func (m *MarketplacePurchase) GetAccount() *MarketplacePurchaseAccount { - if m == nil { - return nil - } - return m.Account -} - -// GetBillingCycle returns the BillingCycle field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetBillingCycle() string { - if m == nil || m.BillingCycle == nil { - return "" - } - return *m.BillingCycle -} - -// GetFreeTrialEndsOn returns the FreeTrialEndsOn field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetFreeTrialEndsOn() Timestamp { - if m == nil || m.FreeTrialEndsOn == nil { - return Timestamp{} - } - return *m.FreeTrialEndsOn -} - -// GetNextBillingDate returns the NextBillingDate field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetNextBillingDate() Timestamp { - if m == nil || m.NextBillingDate == nil { - return Timestamp{} - } - return *m.NextBillingDate -} - -// GetOnFreeTrial returns the OnFreeTrial field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetOnFreeTrial() bool { - if m == nil || m.OnFreeTrial == nil { - return false - } - return *m.OnFreeTrial -} - -// GetPlan returns the Plan field. -func (m *MarketplacePurchase) GetPlan() *MarketplacePlan { - if m == nil { - return nil - } - return m.Plan -} - -// GetUnitCount returns the UnitCount field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetUnitCount() int { - if m == nil || m.UnitCount == nil { - return 0 - } - return *m.UnitCount -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchase) GetUpdatedAt() Timestamp { - if m == nil || m.UpdatedAt == nil { - return Timestamp{} - } - return *m.UpdatedAt -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseAccount) GetEmail() string { - if m == nil || m.Email == nil { - return "" - } - return *m.Email -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseAccount) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseAccount) GetLogin() string { - if m == nil || m.Login == nil { - return "" - } - return *m.Login -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseAccount) GetNodeID() string { - if m == nil || m.NodeID == nil { - return "" - } - return *m.NodeID -} - -// GetOrganizationBillingEmail returns the OrganizationBillingEmail field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseAccount) GetOrganizationBillingEmail() string { - if m == nil || m.OrganizationBillingEmail == nil { - return "" - } - return *m.OrganizationBillingEmail -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseAccount) GetType() string { - if m == nil || m.Type == nil { - return "" - } - return *m.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseAccount) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetEffectiveDate returns the EffectiveDate field if it's non-nil, zero value otherwise. -func (m *MarketplacePurchaseEvent) GetEffectiveDate() Timestamp { - if m == nil || m.EffectiveDate == nil { - return Timestamp{} - } - return *m.EffectiveDate -} - -// GetInstallation returns the Installation field. -func (m *MarketplacePurchaseEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMarketplacePurchase returns the MarketplacePurchase field. -func (m *MarketplacePurchaseEvent) GetMarketplacePurchase() *MarketplacePurchase { - if m == nil { - return nil - } - return m.MarketplacePurchase -} - -// GetPreviousMarketplacePurchase returns the PreviousMarketplacePurchase field. -func (m *MarketplacePurchaseEvent) GetPreviousMarketplacePurchase() *MarketplacePurchase { - if m == nil { - return nil - } - return m.PreviousMarketplacePurchase -} - -// GetSender returns the Sender field. -func (m *MarketplacePurchaseEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetText returns the Text field if it's non-nil, zero value otherwise. -func (m *Match) GetText() string { - if m == nil || m.Text == nil { - return "" - } - return *m.Text -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MemberEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetInstallation returns the Installation field. -func (m *MemberEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMember returns the Member field. -func (m *MemberEvent) GetMember() *User { - if m == nil { - return nil - } - return m.Member -} - -// GetRepo returns the Repo field. -func (m *MemberEvent) GetRepo() *Repository { - if m == nil { - return nil - } - return m.Repo -} - -// GetSender returns the Sender field. -func (m *MemberEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetOrganization returns the Organization field. -func (m *Membership) GetOrganization() *Organization { - if m == nil { - return nil - } - return m.Organization -} - -// GetOrganizationURL returns the OrganizationURL field if it's non-nil, zero value otherwise. -func (m *Membership) GetOrganizationURL() string { - if m == nil || m.OrganizationURL == nil { - return "" - } - return *m.OrganizationURL -} - -// GetRole returns the Role field if it's non-nil, zero value otherwise. -func (m *Membership) GetRole() string { - if m == nil || m.Role == nil { - return "" - } - return *m.Role -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *Membership) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Membership) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetUser returns the User field. -func (m *Membership) GetUser() *User { - if m == nil { - return nil - } - return m.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MembershipEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetInstallation returns the Installation field. -func (m *MembershipEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMember returns the Member field. -func (m *MembershipEvent) GetMember() *User { - if m == nil { - return nil - } - return m.Member -} - -// GetOrg returns the Org field. -func (m *MembershipEvent) GetOrg() *Organization { - if m == nil { - return nil - } - return m.Org -} - -// GetScope returns the Scope field if it's non-nil, zero value otherwise. -func (m *MembershipEvent) GetScope() string { - if m == nil || m.Scope == nil { - return "" - } - return *m.Scope -} - -// GetSender returns the Sender field. -func (m *MembershipEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetTeam returns the Team field. -func (m *MembershipEvent) GetTeam() *Team { - if m == nil { - return nil - } - return m.Team -} - -// GetBaseRef returns the BaseRef field if it's non-nil, zero value otherwise. -func (m *MergeGroup) GetBaseRef() string { - if m == nil || m.BaseRef == nil { - return "" - } - return *m.BaseRef -} - -// GetBaseSHA returns the BaseSHA field if it's non-nil, zero value otherwise. -func (m *MergeGroup) GetBaseSHA() string { - if m == nil || m.BaseSHA == nil { - return "" - } - return *m.BaseSHA -} - -// GetHeadCommit returns the HeadCommit field. -func (m *MergeGroup) GetHeadCommit() *Commit { - if m == nil { - return nil - } - return m.HeadCommit -} - -// GetHeadRef returns the HeadRef field if it's non-nil, zero value otherwise. -func (m *MergeGroup) GetHeadRef() string { - if m == nil || m.HeadRef == nil { - return "" - } - return *m.HeadRef -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (m *MergeGroup) GetHeadSHA() string { - if m == nil || m.HeadSHA == nil { - return "" - } - return *m.HeadSHA -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MergeGroupEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetInstallation returns the Installation field. -func (m *MergeGroupEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMergeGroup returns the MergeGroup field. -func (m *MergeGroupEvent) GetMergeGroup() *MergeGroup { - if m == nil { - return nil - } - return m.MergeGroup -} - -// GetOrg returns the Org field. -func (m *MergeGroupEvent) GetOrg() *Organization { - if m == nil { - return nil - } - return m.Org -} - -// GetRepo returns the Repo field. -func (m *MergeGroupEvent) GetRepo() *Repository { - if m == nil { - return nil - } - return m.Repo -} - -// GetSender returns the Sender field. -func (m *MergeGroupEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetText returns the Text field if it's non-nil, zero value otherwise. -func (m *Message) GetText() string { - if m == nil || m.Text == nil { - return "" - } - return *m.Text -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MetaEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetHook returns the Hook field. -func (m *MetaEvent) GetHook() *Hook { - if m == nil { - return nil - } - return m.Hook -} - -// GetHookID returns the HookID field if it's non-nil, zero value otherwise. -func (m *MetaEvent) GetHookID() int64 { - if m == nil || m.HookID == nil { - return 0 - } - return *m.HookID -} - -// GetInstallation returns the Installation field. -func (m *MetaEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetOrg returns the Org field. -func (m *MetaEvent) GetOrg() *Organization { - if m == nil { - return nil - } - return m.Org -} - -// GetRepo returns the Repo field. -func (m *MetaEvent) GetRepo() *Repository { - if m == nil { - return nil - } - return m.Repo -} - -// GetSender returns the Sender field. -func (m *MetaEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (m *Metric) GetHTMLURL() string { - if m == nil || m.HTMLURL == nil { - return "" - } - return *m.HTMLURL -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (m *Metric) GetKey() string { - if m == nil || m.Key == nil { - return "" - } - return *m.Key -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (m *Metric) GetName() string { - if m == nil || m.Name == nil { - return "" - } - return *m.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (m *Metric) GetNodeID() string { - if m == nil || m.NodeID == nil { - return "" - } - return *m.NodeID -} - -// GetSPDXID returns the SPDXID field if it's non-nil, zero value otherwise. -func (m *Metric) GetSPDXID() string { - if m == nil || m.SPDXID == nil { - return "" - } - return *m.SPDXID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Metric) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (m *Migration) GetCreatedAt() string { - if m == nil || m.CreatedAt == nil { - return "" - } - return *m.CreatedAt -} - -// GetExcludeAttachments returns the ExcludeAttachments field if it's non-nil, zero value otherwise. -func (m *Migration) GetExcludeAttachments() bool { - if m == nil || m.ExcludeAttachments == nil { - return false - } - return *m.ExcludeAttachments -} - -// GetGUID returns the GUID field if it's non-nil, zero value otherwise. -func (m *Migration) GetGUID() string { - if m == nil || m.GUID == nil { - return "" - } - return *m.GUID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *Migration) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetLockRepositories returns the LockRepositories field if it's non-nil, zero value otherwise. -func (m *Migration) GetLockRepositories() bool { - if m == nil || m.LockRepositories == nil { - return false - } - return *m.LockRepositories -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *Migration) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (m *Migration) GetUpdatedAt() string { - if m == nil || m.UpdatedAt == nil { - return "" - } - return *m.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Migration) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetClosedAt() Timestamp { - if m == nil || m.ClosedAt == nil { - return Timestamp{} - } - return *m.ClosedAt -} - -// GetClosedIssues returns the ClosedIssues field if it's non-nil, zero value otherwise. -func (m *Milestone) GetClosedIssues() int { - if m == nil || m.ClosedIssues == nil { - return 0 - } - return *m.ClosedIssues -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetCreatedAt() Timestamp { - if m == nil || m.CreatedAt == nil { - return Timestamp{} - } - return *m.CreatedAt -} - -// GetCreator returns the Creator field. -func (m *Milestone) GetCreator() *User { - if m == nil { - return nil - } - return m.Creator -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (m *Milestone) GetDescription() string { - if m == nil || m.Description == nil { - return "" - } - return *m.Description -} - -// GetDueOn returns the DueOn field if it's non-nil, zero value otherwise. -func (m *Milestone) GetDueOn() Timestamp { - if m == nil || m.DueOn == nil { - return Timestamp{} - } - return *m.DueOn -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (m *Milestone) GetHTMLURL() string { - if m == nil || m.HTMLURL == nil { - return "" - } - return *m.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (m *Milestone) GetID() int64 { - if m == nil || m.ID == nil { - return 0 - } - return *m.ID -} - -// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise. -func (m *Milestone) GetLabelsURL() string { - if m == nil || m.LabelsURL == nil { - return "" - } - return *m.LabelsURL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (m *Milestone) GetNodeID() string { - if m == nil || m.NodeID == nil { - return "" - } - return *m.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (m *Milestone) GetNumber() int { - if m == nil || m.Number == nil { - return 0 - } - return *m.Number -} - -// GetOpenIssues returns the OpenIssues field if it's non-nil, zero value otherwise. -func (m *Milestone) GetOpenIssues() int { - if m == nil || m.OpenIssues == nil { - return 0 - } - return *m.OpenIssues -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *Milestone) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (m *Milestone) GetTitle() string { - if m == nil || m.Title == nil { - return "" - } - return *m.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetUpdatedAt() Timestamp { - if m == nil || m.UpdatedAt == nil { - return Timestamp{} - } - return *m.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (m *Milestone) GetURL() string { - if m == nil || m.URL == nil { - return "" - } - return *m.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (m *MilestoneEvent) GetAction() string { - if m == nil || m.Action == nil { - return "" - } - return *m.Action -} - -// GetChanges returns the Changes field. -func (m *MilestoneEvent) GetChanges() *EditChange { - if m == nil { - return nil - } - return m.Changes -} - -// GetInstallation returns the Installation field. -func (m *MilestoneEvent) GetInstallation() *Installation { - if m == nil { - return nil - } - return m.Installation -} - -// GetMilestone returns the Milestone field. -func (m *MilestoneEvent) GetMilestone() *Milestone { - if m == nil { - return nil - } - return m.Milestone -} - -// GetOrg returns the Org field. -func (m *MilestoneEvent) GetOrg() *Organization { - if m == nil { - return nil - } - return m.Org -} - -// GetRepo returns the Repo field. -func (m *MilestoneEvent) GetRepo() *Repository { - if m == nil { - return nil - } - return m.Repo -} - -// GetSender returns the Sender field. -func (m *MilestoneEvent) GetSender() *User { - if m == nil { - return nil - } - return m.Sender -} - -// GetClosedMilestones returns the ClosedMilestones field if it's non-nil, zero value otherwise. -func (m *MilestoneStats) GetClosedMilestones() int { - if m == nil || m.ClosedMilestones == nil { - return 0 - } - return *m.ClosedMilestones -} - -// GetOpenMilestones returns the OpenMilestones field if it's non-nil, zero value otherwise. -func (m *MilestoneStats) GetOpenMilestones() int { - if m == nil || m.OpenMilestones == nil { - return 0 - } - return *m.OpenMilestones -} - -// GetTotalMilestones returns the TotalMilestones field if it's non-nil, zero value otherwise. -func (m *MilestoneStats) GetTotalMilestones() int { - if m == nil || m.TotalMilestones == nil { - return 0 - } - return *m.TotalMilestones -} - -// GetAnalysisKey returns the AnalysisKey field if it's non-nil, zero value otherwise. -func (m *MostRecentInstance) GetAnalysisKey() string { - if m == nil || m.AnalysisKey == nil { - return "" - } - return *m.AnalysisKey -} - -// GetCommitSHA returns the CommitSHA field if it's non-nil, zero value otherwise. -func (m *MostRecentInstance) GetCommitSHA() string { - if m == nil || m.CommitSHA == nil { - return "" - } - return *m.CommitSHA -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (m *MostRecentInstance) GetEnvironment() string { - if m == nil || m.Environment == nil { - return "" - } - return *m.Environment -} - -// GetLocation returns the Location field. -func (m *MostRecentInstance) GetLocation() *Location { - if m == nil { - return nil - } - return m.Location -} - -// GetMessage returns the Message field. -func (m *MostRecentInstance) GetMessage() *Message { - if m == nil { - return nil - } - return m.Message -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (m *MostRecentInstance) GetRef() string { - if m == nil || m.Ref == nil { - return "" - } - return *m.Ref -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (m *MostRecentInstance) GetState() string { - if m == nil || m.State == nil { - return "" - } - return *m.State -} - -// GetBase returns the Base field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetBase() string { - if n == nil || n.Base == nil { - return "" - } - return *n.Base -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetBody() string { - if n == nil || n.Body == nil { - return "" - } - return *n.Body -} - -// GetDraft returns the Draft field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetDraft() bool { - if n == nil || n.Draft == nil { - return false - } - return *n.Draft -} - -// GetHead returns the Head field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetHead() string { - if n == nil || n.Head == nil { - return "" - } - return *n.Head -} - -// GetIssue returns the Issue field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetIssue() int { - if n == nil || n.Issue == nil { - return 0 - } - return *n.Issue -} - -// GetMaintainerCanModify returns the MaintainerCanModify field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetMaintainerCanModify() bool { - if n == nil || n.MaintainerCanModify == nil { - return false - } - return *n.MaintainerCanModify -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (n *NewPullRequest) GetTitle() string { - if n == nil || n.Title == nil { - return "" - } - return *n.Title -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetDescription() string { - if n == nil || n.Description == nil { - return "" - } - return *n.Description -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetLDAPDN() string { - if n == nil || n.LDAPDN == nil { - return "" - } - return *n.LDAPDN -} - -// GetParentTeamID returns the ParentTeamID field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetParentTeamID() int64 { - if n == nil || n.ParentTeamID == nil { - return 0 - } - return *n.ParentTeamID -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetPermission() string { - if n == nil || n.Permission == nil { - return "" - } - return *n.Permission -} - -// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise. -func (n *NewTeam) GetPrivacy() string { - if n == nil || n.Privacy == nil { - return "" - } - return *n.Privacy -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (n *Notification) GetID() string { - if n == nil || n.ID == nil { - return "" - } - return *n.ID -} - -// GetLastReadAt returns the LastReadAt field if it's non-nil, zero value otherwise. -func (n *Notification) GetLastReadAt() Timestamp { - if n == nil || n.LastReadAt == nil { - return Timestamp{} - } - return *n.LastReadAt -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (n *Notification) GetReason() string { - if n == nil || n.Reason == nil { - return "" - } - return *n.Reason -} - -// GetRepository returns the Repository field. -func (n *Notification) GetRepository() *Repository { - if n == nil { - return nil - } - return n.Repository -} - -// GetSubject returns the Subject field. -func (n *Notification) GetSubject() *NotificationSubject { - if n == nil { - return nil - } - return n.Subject -} - -// GetUnread returns the Unread field if it's non-nil, zero value otherwise. -func (n *Notification) GetUnread() bool { - if n == nil || n.Unread == nil { - return false - } - return *n.Unread -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (n *Notification) GetUpdatedAt() Timestamp { - if n == nil || n.UpdatedAt == nil { - return Timestamp{} - } - return *n.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (n *Notification) GetURL() string { - if n == nil || n.URL == nil { - return "" - } - return *n.URL -} - -// GetLatestCommentURL returns the LatestCommentURL field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetLatestCommentURL() string { - if n == nil || n.LatestCommentURL == nil { - return "" - } - return *n.LatestCommentURL -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetTitle() string { - if n == nil || n.Title == nil { - return "" - } - return *n.Title -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetType() string { - if n == nil || n.Type == nil { - return "" - } - return *n.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (n *NotificationSubject) GetURL() string { - if n == nil || n.URL == nil { - return "" - } - return *n.URL -} - -// GetClientID returns the ClientID field if it's non-nil, zero value otherwise. -func (o *OAuthAPP) GetClientID() string { - if o == nil || o.ClientID == nil { - return "" - } - return *o.ClientID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (o *OAuthAPP) GetName() string { - if o == nil || o.Name == nil { - return "" - } - return *o.Name -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (o *OAuthAPP) GetURL() string { - if o == nil || o.URL == nil { - return "" - } - return *o.URL -} - -// GetUseDefault returns the UseDefault field if it's non-nil, zero value otherwise. -func (o *OIDCSubjectClaimCustomTemplate) GetUseDefault() bool { - if o == nil || o.UseDefault == nil { - return false - } - return *o.UseDefault -} - -// GetAdvancedSecurityEnabledForNewRepos returns the AdvancedSecurityEnabledForNewRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetAdvancedSecurityEnabledForNewRepos() bool { - if o == nil || o.AdvancedSecurityEnabledForNewRepos == nil { - return false - } - return *o.AdvancedSecurityEnabledForNewRepos -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetAvatarURL() string { - if o == nil || o.AvatarURL == nil { - return "" - } - return *o.AvatarURL -} - -// GetBillingEmail returns the BillingEmail field if it's non-nil, zero value otherwise. -func (o *Organization) GetBillingEmail() string { - if o == nil || o.BillingEmail == nil { - return "" - } - return *o.BillingEmail -} - -// GetBlog returns the Blog field if it's non-nil, zero value otherwise. -func (o *Organization) GetBlog() string { - if o == nil || o.Blog == nil { - return "" - } - return *o.Blog -} - -// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise. -func (o *Organization) GetCollaborators() int { - if o == nil || o.Collaborators == nil { - return 0 - } - return *o.Collaborators -} - -// GetCompany returns the Company field if it's non-nil, zero value otherwise. -func (o *Organization) GetCompany() string { - if o == nil || o.Company == nil { - return "" - } - return *o.Company -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (o *Organization) GetCreatedAt() Timestamp { - if o == nil || o.CreatedAt == nil { - return Timestamp{} - } - return *o.CreatedAt -} - -// GetDefaultRepoPermission returns the DefaultRepoPermission field if it's non-nil, zero value otherwise. -func (o *Organization) GetDefaultRepoPermission() string { - if o == nil || o.DefaultRepoPermission == nil { - return "" - } - return *o.DefaultRepoPermission -} - -// GetDefaultRepoSettings returns the DefaultRepoSettings field if it's non-nil, zero value otherwise. -func (o *Organization) GetDefaultRepoSettings() string { - if o == nil || o.DefaultRepoSettings == nil { - return "" - } - return *o.DefaultRepoSettings -} - -// GetDependabotAlertsEnabledForNewRepos returns the DependabotAlertsEnabledForNewRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetDependabotAlertsEnabledForNewRepos() bool { - if o == nil || o.DependabotAlertsEnabledForNewRepos == nil { - return false - } - return *o.DependabotAlertsEnabledForNewRepos -} - -// GetDependabotSecurityUpdatesEnabledForNewRepos returns the DependabotSecurityUpdatesEnabledForNewRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetDependabotSecurityUpdatesEnabledForNewRepos() bool { - if o == nil || o.DependabotSecurityUpdatesEnabledForNewRepos == nil { - return false - } - return *o.DependabotSecurityUpdatesEnabledForNewRepos -} - -// GetDependencyGraphEnabledForNewRepos returns the DependencyGraphEnabledForNewRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetDependencyGraphEnabledForNewRepos() bool { - if o == nil || o.DependencyGraphEnabledForNewRepos == nil { - return false - } - return *o.DependencyGraphEnabledForNewRepos -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (o *Organization) GetDescription() string { - if o == nil || o.Description == nil { - return "" - } - return *o.Description -} - -// GetDiskUsage returns the DiskUsage field if it's non-nil, zero value otherwise. -func (o *Organization) GetDiskUsage() int { - if o == nil || o.DiskUsage == nil { - return 0 - } - return *o.DiskUsage -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (o *Organization) GetEmail() string { - if o == nil || o.Email == nil { - return "" - } - return *o.Email -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetEventsURL() string { - if o == nil || o.EventsURL == nil { - return "" - } - return *o.EventsURL -} - -// GetFollowers returns the Followers field if it's non-nil, zero value otherwise. -func (o *Organization) GetFollowers() int { - if o == nil || o.Followers == nil { - return 0 - } - return *o.Followers -} - -// GetFollowing returns the Following field if it's non-nil, zero value otherwise. -func (o *Organization) GetFollowing() int { - if o == nil || o.Following == nil { - return 0 - } - return *o.Following -} - -// GetHasOrganizationProjects returns the HasOrganizationProjects field if it's non-nil, zero value otherwise. -func (o *Organization) GetHasOrganizationProjects() bool { - if o == nil || o.HasOrganizationProjects == nil { - return false - } - return *o.HasOrganizationProjects -} - -// GetHasRepositoryProjects returns the HasRepositoryProjects field if it's non-nil, zero value otherwise. -func (o *Organization) GetHasRepositoryProjects() bool { - if o == nil || o.HasRepositoryProjects == nil { - return false - } - return *o.HasRepositoryProjects -} - -// GetHooksURL returns the HooksURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetHooksURL() string { - if o == nil || o.HooksURL == nil { - return "" - } - return *o.HooksURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetHTMLURL() string { - if o == nil || o.HTMLURL == nil { - return "" - } - return *o.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (o *Organization) GetID() int64 { - if o == nil || o.ID == nil { - return 0 - } - return *o.ID -} - -// GetIssuesURL returns the IssuesURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetIssuesURL() string { - if o == nil || o.IssuesURL == nil { - return "" - } - return *o.IssuesURL -} - -// GetIsVerified returns the IsVerified field if it's non-nil, zero value otherwise. -func (o *Organization) GetIsVerified() bool { - if o == nil || o.IsVerified == nil { - return false - } - return *o.IsVerified -} - -// GetLocation returns the Location field if it's non-nil, zero value otherwise. -func (o *Organization) GetLocation() string { - if o == nil || o.Location == nil { - return "" - } - return *o.Location -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (o *Organization) GetLogin() string { - if o == nil || o.Login == nil { - return "" - } - return *o.Login -} - -// GetMembersAllowedRepositoryCreationType returns the MembersAllowedRepositoryCreationType field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersAllowedRepositoryCreationType() string { - if o == nil || o.MembersAllowedRepositoryCreationType == nil { - return "" - } - return *o.MembersAllowedRepositoryCreationType -} - -// GetMembersCanCreateInternalRepos returns the MembersCanCreateInternalRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreateInternalRepos() bool { - if o == nil || o.MembersCanCreateInternalRepos == nil { - return false - } - return *o.MembersCanCreateInternalRepos -} - -// GetMembersCanCreatePages returns the MembersCanCreatePages field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreatePages() bool { - if o == nil || o.MembersCanCreatePages == nil { - return false - } - return *o.MembersCanCreatePages -} - -// GetMembersCanCreatePrivatePages returns the MembersCanCreatePrivatePages field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreatePrivatePages() bool { - if o == nil || o.MembersCanCreatePrivatePages == nil { - return false - } - return *o.MembersCanCreatePrivatePages -} - -// GetMembersCanCreatePrivateRepos returns the MembersCanCreatePrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreatePrivateRepos() bool { - if o == nil || o.MembersCanCreatePrivateRepos == nil { - return false - } - return *o.MembersCanCreatePrivateRepos -} - -// GetMembersCanCreatePublicPages returns the MembersCanCreatePublicPages field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreatePublicPages() bool { - if o == nil || o.MembersCanCreatePublicPages == nil { - return false - } - return *o.MembersCanCreatePublicPages -} - -// GetMembersCanCreatePublicRepos returns the MembersCanCreatePublicRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreatePublicRepos() bool { - if o == nil || o.MembersCanCreatePublicRepos == nil { - return false - } - return *o.MembersCanCreatePublicRepos -} - -// GetMembersCanCreateRepos returns the MembersCanCreateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanCreateRepos() bool { - if o == nil || o.MembersCanCreateRepos == nil { - return false - } - return *o.MembersCanCreateRepos -} - -// GetMembersCanForkPrivateRepos returns the MembersCanForkPrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersCanForkPrivateRepos() bool { - if o == nil || o.MembersCanForkPrivateRepos == nil { - return false - } - return *o.MembersCanForkPrivateRepos -} - -// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetMembersURL() string { - if o == nil || o.MembersURL == nil { - return "" - } - return *o.MembersURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (o *Organization) GetName() string { - if o == nil || o.Name == nil { - return "" - } - return *o.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (o *Organization) GetNodeID() string { - if o == nil || o.NodeID == nil { - return "" - } - return *o.NodeID -} - -// GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetOwnedPrivateRepos() int64 { - if o == nil || o.OwnedPrivateRepos == nil { - return 0 - } - return *o.OwnedPrivateRepos -} - -// GetPlan returns the Plan field. -func (o *Organization) GetPlan() *Plan { - if o == nil { - return nil - } - return o.Plan -} - -// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise. -func (o *Organization) GetPrivateGists() int { - if o == nil || o.PrivateGists == nil { - return 0 - } - return *o.PrivateGists -} - -// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise. -func (o *Organization) GetPublicGists() int { - if o == nil || o.PublicGists == nil { - return 0 - } - return *o.PublicGists -} - -// GetPublicMembersURL returns the PublicMembersURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetPublicMembersURL() string { - if o == nil || o.PublicMembersURL == nil { - return "" - } - return *o.PublicMembersURL -} - -// GetPublicRepos returns the PublicRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetPublicRepos() int { - if o == nil || o.PublicRepos == nil { - return 0 - } - return *o.PublicRepos -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (o *Organization) GetReposURL() string { - if o == nil || o.ReposURL == nil { - return "" - } - return *o.ReposURL -} - -// GetSecretScanningEnabledForNewRepos returns the SecretScanningEnabledForNewRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetSecretScanningEnabledForNewRepos() bool { - if o == nil || o.SecretScanningEnabledForNewRepos == nil { - return false - } - return *o.SecretScanningEnabledForNewRepos -} - -// GetSecretScanningPushProtectionEnabledForNewRepos returns the SecretScanningPushProtectionEnabledForNewRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetSecretScanningPushProtectionEnabledForNewRepos() bool { - if o == nil || o.SecretScanningPushProtectionEnabledForNewRepos == nil { - return false - } - return *o.SecretScanningPushProtectionEnabledForNewRepos -} - -// GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetTotalPrivateRepos() int64 { - if o == nil || o.TotalPrivateRepos == nil { - return 0 - } - return *o.TotalPrivateRepos -} - -// GetTwitterUsername returns the TwitterUsername field if it's non-nil, zero value otherwise. -func (o *Organization) GetTwitterUsername() string { - if o == nil || o.TwitterUsername == nil { - return "" - } - return *o.TwitterUsername -} - -// GetTwoFactorRequirementEnabled returns the TwoFactorRequirementEnabled field if it's non-nil, zero value otherwise. -func (o *Organization) GetTwoFactorRequirementEnabled() bool { - if o == nil || o.TwoFactorRequirementEnabled == nil { - return false - } - return *o.TwoFactorRequirementEnabled -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (o *Organization) GetType() string { - if o == nil || o.Type == nil { - return "" - } - return *o.Type -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (o *Organization) GetUpdatedAt() Timestamp { - if o == nil || o.UpdatedAt == nil { - return Timestamp{} - } - return *o.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (o *Organization) GetURL() string { - if o == nil || o.URL == nil { - return "" - } - return *o.URL -} - -// GetWebCommitSignoffRequired returns the WebCommitSignoffRequired field if it's non-nil, zero value otherwise. -func (o *Organization) GetWebCommitSignoffRequired() bool { - if o == nil || o.WebCommitSignoffRequired == nil { - return false - } - return *o.WebCommitSignoffRequired -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (o *OrganizationCustomRepoRoles) GetTotalCount() int { - if o == nil || o.TotalCount == nil { - return 0 - } - return *o.TotalCount -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (o *OrganizationEvent) GetAction() string { - if o == nil || o.Action == nil { - return "" - } - return *o.Action -} - -// GetInstallation returns the Installation field. -func (o *OrganizationEvent) GetInstallation() *Installation { - if o == nil { - return nil - } - return o.Installation -} - -// GetInvitation returns the Invitation field. -func (o *OrganizationEvent) GetInvitation() *Invitation { - if o == nil { - return nil - } - return o.Invitation -} - -// GetMembership returns the Membership field. -func (o *OrganizationEvent) GetMembership() *Membership { - if o == nil { - return nil - } - return o.Membership -} - -// GetOrganization returns the Organization field. -func (o *OrganizationEvent) GetOrganization() *Organization { - if o == nil { - return nil - } - return o.Organization -} - -// GetSender returns the Sender field. -func (o *OrganizationEvent) GetSender() *User { - if o == nil { - return nil - } - return o.Sender -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (o *OrganizationInstallations) GetTotalCount() int { - if o == nil || o.TotalCount == nil { - return 0 - } - return *o.TotalCount -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (o *OrgBlockEvent) GetAction() string { - if o == nil || o.Action == nil { - return "" - } - return *o.Action -} - -// GetBlockedUser returns the BlockedUser field. -func (o *OrgBlockEvent) GetBlockedUser() *User { - if o == nil { - return nil - } - return o.BlockedUser -} - -// GetInstallation returns the Installation field. -func (o *OrgBlockEvent) GetInstallation() *Installation { - if o == nil { - return nil - } - return o.Installation -} - -// GetOrganization returns the Organization field. -func (o *OrgBlockEvent) GetOrganization() *Organization { - if o == nil { - return nil - } - return o.Organization -} - -// GetSender returns the Sender field. -func (o *OrgBlockEvent) GetSender() *User { - if o == nil { - return nil - } - return o.Sender -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetCreatedAt() Timestamp { - if o == nil || o.CreatedAt == nil { - return Timestamp{} - } - return *o.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetID() int64 { - if o == nil || o.ID == nil { - return 0 - } - return *o.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetName() string { - if o == nil || o.Name == nil { - return "" - } - return *o.Name -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetPath() string { - if o == nil || o.Path == nil { - return "" - } - return *o.Path -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetRef() string { - if o == nil || o.Ref == nil { - return "" - } - return *o.Ref -} - -// GetRepository returns the Repository field. -func (o *OrgRequiredWorkflow) GetRepository() *Repository { - if o == nil { - return nil - } - return o.Repository -} - -// GetScope returns the Scope field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetScope() string { - if o == nil || o.Scope == nil { - return "" - } - return *o.Scope -} - -// GetSelectedRepositoriesURL returns the SelectedRepositoriesURL field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetSelectedRepositoriesURL() string { - if o == nil || o.SelectedRepositoriesURL == nil { - return "" - } - return *o.SelectedRepositoriesURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetState() string { - if o == nil || o.State == nil { - return "" - } - return *o.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflow) GetUpdatedAt() Timestamp { - if o == nil || o.UpdatedAt == nil { - return Timestamp{} - } - return *o.UpdatedAt -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (o *OrgRequiredWorkflows) GetTotalCount() int { - if o == nil || o.TotalCount == nil { - return 0 - } - return *o.TotalCount -} - -// GetDisabledOrgs returns the DisabledOrgs field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetDisabledOrgs() int { - if o == nil || o.DisabledOrgs == nil { - return 0 - } - return *o.DisabledOrgs -} - -// GetTotalOrgs returns the TotalOrgs field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetTotalOrgs() int { - if o == nil || o.TotalOrgs == nil { - return 0 - } - return *o.TotalOrgs -} - -// GetTotalTeamMembers returns the TotalTeamMembers field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetTotalTeamMembers() int { - if o == nil || o.TotalTeamMembers == nil { - return 0 - } - return *o.TotalTeamMembers -} - -// GetTotalTeams returns the TotalTeams field if it's non-nil, zero value otherwise. -func (o *OrgStats) GetTotalTeams() int { - if o == nil || o.TotalTeams == nil { - return 0 - } - return *o.TotalTeams -} - -// GetOrg returns the Org field. -func (o *OwnerInfo) GetOrg() *User { - if o == nil { - return nil - } - return o.Org -} - -// GetUser returns the User field. -func (o *OwnerInfo) GetUser() *User { - if o == nil { - return nil - } - return o.User -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *Package) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *Package) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *Package) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *Package) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetOwner returns the Owner field. -func (p *Package) GetOwner() *User { - if p == nil { - return nil - } - return p.Owner -} - -// GetPackageType returns the PackageType field if it's non-nil, zero value otherwise. -func (p *Package) GetPackageType() string { - if p == nil || p.PackageType == nil { - return "" - } - return *p.PackageType -} - -// GetPackageVersion returns the PackageVersion field. -func (p *Package) GetPackageVersion() *PackageVersion { - if p == nil { - return nil - } - return p.PackageVersion -} - -// GetRegistry returns the Registry field. -func (p *Package) GetRegistry() *PackageRegistry { - if p == nil { - return nil - } - return p.Registry -} - -// GetRepository returns the Repository field. -func (p *Package) GetRepository() *Repository { - if p == nil { - return nil - } - return p.Repository -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *Package) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *Package) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetVersionCount returns the VersionCount field if it's non-nil, zero value otherwise. -func (p *Package) GetVersionCount() int64 { - if p == nil || p.VersionCount == nil { - return 0 - } - return *p.VersionCount -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (p *Package) GetVisibility() string { - if p == nil || p.Visibility == nil { - return "" - } - return *p.Visibility -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PackageEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetInstallation returns the Installation field. -func (p *PackageEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *PackageEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetPackage returns the Package field. -func (p *PackageEvent) GetPackage() *Package { - if p == nil { - return nil - } - return p.Package -} - -// GetRepo returns the Repo field. -func (p *PackageEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PackageEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetAuthor returns the Author field. -func (p *PackageFile) GetAuthor() *User { - if p == nil { - return nil - } - return p.Author -} - -// GetContentType returns the ContentType field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetContentType() string { - if p == nil || p.ContentType == nil { - return "" - } - return *p.ContentType -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetDownloadURL() string { - if p == nil || p.DownloadURL == nil { - return "" - } - return *p.DownloadURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetMD5 returns the MD5 field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetMD5() string { - if p == nil || p.MD5 == nil { - return "" - } - return *p.MD5 -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetSHA1 returns the SHA1 field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetSHA1() string { - if p == nil || p.SHA1 == nil { - return "" - } - return *p.SHA1 -} - -// GetSHA256 returns the SHA256 field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetSHA256() string { - if p == nil || p.SHA256 == nil { - return "" - } - return *p.SHA256 -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetSize() int64 { - if p == nil || p.Size == nil { - return 0 - } - return *p.Size -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PackageFile) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetPackageType returns the PackageType field if it's non-nil, zero value otherwise. -func (p *PackageListOptions) GetPackageType() string { - if p == nil || p.PackageType == nil { - return "" - } - return *p.PackageType -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *PackageListOptions) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (p *PackageListOptions) GetVisibility() string { - if p == nil || p.Visibility == nil { - return "" - } - return *p.Visibility -} - -// GetContainer returns the Container field. -func (p *PackageMetadata) GetContainer() *PackageContainerMetadata { - if p == nil { - return nil - } - return p.Container -} - -// GetPackageType returns the PackageType field if it's non-nil, zero value otherwise. -func (p *PackageMetadata) GetPackageType() string { - if p == nil || p.PackageType == nil { - return "" - } - return *p.PackageType -} - -// GetAboutURL returns the AboutURL field if it's non-nil, zero value otherwise. -func (p *PackageRegistry) GetAboutURL() string { - if p == nil || p.AboutURL == nil { - return "" - } - return *p.AboutURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PackageRegistry) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (p *PackageRegistry) GetType() string { - if p == nil || p.Type == nil { - return "" - } - return *p.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PackageRegistry) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetVendor returns the Vendor field if it's non-nil, zero value otherwise. -func (p *PackageRegistry) GetVendor() string { - if p == nil || p.Vendor == nil { - return "" - } - return *p.Vendor -} - -// GetAuthor returns the Author field. -func (p *PackageRelease) GetAuthor() *User { - if p == nil { - return nil - } - return p.Author -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDraft returns the Draft field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetDraft() bool { - if p == nil || p.Draft == nil { - return false - } - return *p.Draft -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetPrerelease returns the Prerelease field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetPrerelease() bool { - if p == nil || p.Prerelease == nil { - return false - } - return *p.Prerelease -} - -// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetPublishedAt() Timestamp { - if p == nil || p.PublishedAt == nil { - return Timestamp{} - } - return *p.PublishedAt -} - -// GetTagName returns the TagName field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetTagName() string { - if p == nil || p.TagName == nil { - return "" - } - return *p.TagName -} - -// GetTargetCommitish returns the TargetCommitish field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetTargetCommitish() string { - if p == nil || p.TargetCommitish == nil { - return "" - } - return *p.TargetCommitish -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PackageRelease) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetAuthor returns the Author field. -func (p *PackageVersion) GetAuthor() *User { - if p == nil { - return nil - } - return p.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetBodyHTML returns the BodyHTML field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetBodyHTML() string { - if p == nil || p.BodyHTML == nil { - return "" - } - return *p.BodyHTML -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDraft returns the Draft field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetDraft() bool { - if p == nil || p.Draft == nil { - return false - } - return *p.Draft -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetInstallationCommand returns the InstallationCommand field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetInstallationCommand() string { - if p == nil || p.InstallationCommand == nil { - return "" - } - return *p.InstallationCommand -} - -// GetManifest returns the Manifest field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetManifest() string { - if p == nil || p.Manifest == nil { - return "" - } - return *p.Manifest -} - -// GetMetadata returns the Metadata field. -func (p *PackageVersion) GetMetadata() *PackageMetadata { - if p == nil { - return nil - } - return p.Metadata -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetPackageHTMLURL returns the PackageHTMLURL field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetPackageHTMLURL() string { - if p == nil || p.PackageHTMLURL == nil { - return "" - } - return *p.PackageHTMLURL -} - -// GetPrerelease returns the Prerelease field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetPrerelease() bool { - if p == nil || p.Prerelease == nil { - return false - } - return *p.Prerelease -} - -// GetRelease returns the Release field. -func (p *PackageVersion) GetRelease() *PackageRelease { - if p == nil { - return nil - } - return p.Release -} - -// GetSummary returns the Summary field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetSummary() string { - if p == nil || p.Summary == nil { - return "" - } - return *p.Summary -} - -// GetTagName returns the TagName field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetTagName() string { - if p == nil || p.TagName == nil { - return "" - } - return *p.TagName -} - -// GetTargetCommitish returns the TargetCommitish field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetTargetCommitish() string { - if p == nil || p.TargetCommitish == nil { - return "" - } - return *p.TargetCommitish -} - -// GetTargetOID returns the TargetOID field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetTargetOID() string { - if p == nil || p.TargetOID == nil { - return "" - } - return *p.TargetOID -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetVersion returns the Version field if it's non-nil, zero value otherwise. -func (p *PackageVersion) GetVersion() string { - if p == nil || p.Version == nil { - return "" - } - return *p.Version -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *Page) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *Page) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetPageName returns the PageName field if it's non-nil, zero value otherwise. -func (p *Page) GetPageName() string { - if p == nil || p.PageName == nil { - return "" - } - return *p.PageName -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (p *Page) GetSHA() string { - if p == nil || p.SHA == nil { - return "" - } - return *p.SHA -} - -// GetSummary returns the Summary field if it's non-nil, zero value otherwise. -func (p *Page) GetSummary() string { - if p == nil || p.Summary == nil { - return "" - } - return *p.Summary -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (p *Page) GetTitle() string { - if p == nil || p.Title == nil { - return "" - } - return *p.Title -} - -// GetBuild returns the Build field. -func (p *PageBuildEvent) GetBuild() *PagesBuild { - if p == nil { - return nil - } - return p.Build -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PageBuildEvent) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetInstallation returns the Installation field. -func (p *PageBuildEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetRepo returns the Repo field. -func (p *PageBuildEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PageBuildEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetBuildType returns the BuildType field if it's non-nil, zero value otherwise. -func (p *Pages) GetBuildType() string { - if p == nil || p.BuildType == nil { - return "" - } - return *p.BuildType -} - -// GetCNAME returns the CNAME field if it's non-nil, zero value otherwise. -func (p *Pages) GetCNAME() string { - if p == nil || p.CNAME == nil { - return "" - } - return *p.CNAME -} - -// GetCustom404 returns the Custom404 field if it's non-nil, zero value otherwise. -func (p *Pages) GetCustom404() bool { - if p == nil || p.Custom404 == nil { - return false - } - return *p.Custom404 -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *Pages) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetHTTPSCertificate returns the HTTPSCertificate field. -func (p *Pages) GetHTTPSCertificate() *PagesHTTPSCertificate { - if p == nil { - return nil - } - return p.HTTPSCertificate -} - -// GetHTTPSEnforced returns the HTTPSEnforced field if it's non-nil, zero value otherwise. -func (p *Pages) GetHTTPSEnforced() bool { - if p == nil || p.HTTPSEnforced == nil { - return false - } - return *p.HTTPSEnforced -} - -// GetPublic returns the Public field if it's non-nil, zero value otherwise. -func (p *Pages) GetPublic() bool { - if p == nil || p.Public == nil { - return false - } - return *p.Public -} - -// GetSource returns the Source field. -func (p *Pages) GetSource() *PagesSource { - if p == nil { - return nil - } - return p.Source -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (p *Pages) GetStatus() string { - if p == nil || p.Status == nil { - return "" - } - return *p.Status -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *Pages) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetCommit returns the Commit field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetCommit() string { - if p == nil || p.Commit == nil { - return "" - } - return *p.Commit -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDuration returns the Duration field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetDuration() int { - if p == nil || p.Duration == nil { - return 0 - } - return *p.Duration -} - -// GetError returns the Error field. -func (p *PagesBuild) GetError() *PagesError { - if p == nil { - return nil - } - return p.Error -} - -// GetPusher returns the Pusher field. -func (p *PagesBuild) GetPusher() *User { - if p == nil { - return nil - } - return p.Pusher -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetStatus() string { - if p == nil || p.Status == nil { - return "" - } - return *p.Status -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PagesBuild) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetCAAError returns the CAAError field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetCAAError() string { - if p == nil || p.CAAError == nil { - return "" - } - return *p.CAAError -} - -// GetDNSResolves returns the DNSResolves field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetDNSResolves() bool { - if p == nil || p.DNSResolves == nil { - return false - } - return *p.DNSResolves -} - -// GetEnforcesHTTPS returns the EnforcesHTTPS field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetEnforcesHTTPS() bool { - if p == nil || p.EnforcesHTTPS == nil { - return false - } - return *p.EnforcesHTTPS -} - -// GetHasCNAMERecord returns the HasCNAMERecord field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetHasCNAMERecord() bool { - if p == nil || p.HasCNAMERecord == nil { - return false - } - return *p.HasCNAMERecord -} - -// GetHasMXRecordsPresent returns the HasMXRecordsPresent field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetHasMXRecordsPresent() bool { - if p == nil || p.HasMXRecordsPresent == nil { - return false - } - return *p.HasMXRecordsPresent -} - -// GetHost returns the Host field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetHost() string { - if p == nil || p.Host == nil { - return "" - } - return *p.Host -} - -// GetHTTPSError returns the HTTPSError field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetHTTPSError() string { - if p == nil || p.HTTPSError == nil { - return "" - } - return *p.HTTPSError -} - -// GetIsApexDomain returns the IsApexDomain field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsApexDomain() bool { - if p == nil || p.IsApexDomain == nil { - return false - } - return *p.IsApexDomain -} - -// GetIsARecord returns the IsARecord field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsARecord() bool { - if p == nil || p.IsARecord == nil { - return false - } - return *p.IsARecord -} - -// GetIsCloudflareIP returns the IsCloudflareIP field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsCloudflareIP() bool { - if p == nil || p.IsCloudflareIP == nil { - return false - } - return *p.IsCloudflareIP -} - -// GetIsCNAMEToFastly returns the IsCNAMEToFastly field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsCNAMEToFastly() bool { - if p == nil || p.IsCNAMEToFastly == nil { - return false - } - return *p.IsCNAMEToFastly -} - -// GetIsCNAMEToGithubUserDomain returns the IsCNAMEToGithubUserDomain field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsCNAMEToGithubUserDomain() bool { - if p == nil || p.IsCNAMEToGithubUserDomain == nil { - return false - } - return *p.IsCNAMEToGithubUserDomain -} - -// GetIsCNAMEToPagesDotGithubDotCom returns the IsCNAMEToPagesDotGithubDotCom field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsCNAMEToPagesDotGithubDotCom() bool { - if p == nil || p.IsCNAMEToPagesDotGithubDotCom == nil { - return false - } - return *p.IsCNAMEToPagesDotGithubDotCom -} - -// GetIsFastlyIP returns the IsFastlyIP field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsFastlyIP() bool { - if p == nil || p.IsFastlyIP == nil { - return false - } - return *p.IsFastlyIP -} - -// GetIsHTTPSEligible returns the IsHTTPSEligible field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsHTTPSEligible() bool { - if p == nil || p.IsHTTPSEligible == nil { - return false - } - return *p.IsHTTPSEligible -} - -// GetIsNonGithubPagesIPPresent returns the IsNonGithubPagesIPPresent field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsNonGithubPagesIPPresent() bool { - if p == nil || p.IsNonGithubPagesIPPresent == nil { - return false - } - return *p.IsNonGithubPagesIPPresent -} - -// GetIsOldIPAddress returns the IsOldIPAddress field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsOldIPAddress() bool { - if p == nil || p.IsOldIPAddress == nil { - return false - } - return *p.IsOldIPAddress -} - -// GetIsPagesDomain returns the IsPagesDomain field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsPagesDomain() bool { - if p == nil || p.IsPagesDomain == nil { - return false - } - return *p.IsPagesDomain -} - -// GetIsPointedToGithubPagesIP returns the IsPointedToGithubPagesIP field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsPointedToGithubPagesIP() bool { - if p == nil || p.IsPointedToGithubPagesIP == nil { - return false - } - return *p.IsPointedToGithubPagesIP -} - -// GetIsProxied returns the IsProxied field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsProxied() bool { - if p == nil || p.IsProxied == nil { - return false - } - return *p.IsProxied -} - -// GetIsServedByPages returns the IsServedByPages field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsServedByPages() bool { - if p == nil || p.IsServedByPages == nil { - return false - } - return *p.IsServedByPages -} - -// GetIsValid returns the IsValid field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsValid() bool { - if p == nil || p.IsValid == nil { - return false - } - return *p.IsValid -} - -// GetIsValidDomain returns the IsValidDomain field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetIsValidDomain() bool { - if p == nil || p.IsValidDomain == nil { - return false - } - return *p.IsValidDomain -} - -// GetNameservers returns the Nameservers field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetNameservers() string { - if p == nil || p.Nameservers == nil { - return "" - } - return *p.Nameservers -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetReason() string { - if p == nil || p.Reason == nil { - return "" - } - return *p.Reason -} - -// GetRespondsToHTTPS returns the RespondsToHTTPS field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetRespondsToHTTPS() bool { - if p == nil || p.RespondsToHTTPS == nil { - return false - } - return *p.RespondsToHTTPS -} - -// GetShouldBeARecord returns the ShouldBeARecord field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetShouldBeARecord() bool { - if p == nil || p.ShouldBeARecord == nil { - return false - } - return *p.ShouldBeARecord -} - -// GetURI returns the URI field if it's non-nil, zero value otherwise. -func (p *PagesDomain) GetURI() string { - if p == nil || p.URI == nil { - return "" - } - return *p.URI -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PagesError) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetAltDomain returns the AltDomain field. -func (p *PagesHealthCheckResponse) GetAltDomain() *PagesDomain { - if p == nil { - return nil - } - return p.AltDomain -} - -// GetDomain returns the Domain field. -func (p *PagesHealthCheckResponse) GetDomain() *PagesDomain { - if p == nil { - return nil - } - return p.Domain -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (p *PagesHTTPSCertificate) GetDescription() string { - if p == nil || p.Description == nil { - return "" - } - return *p.Description -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (p *PagesHTTPSCertificate) GetExpiresAt() string { - if p == nil || p.ExpiresAt == nil { - return "" - } - return *p.ExpiresAt -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *PagesHTTPSCertificate) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetBranch returns the Branch field if it's non-nil, zero value otherwise. -func (p *PagesSource) GetBranch() string { - if p == nil || p.Branch == nil { - return "" - } - return *p.Branch -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (p *PagesSource) GetPath() string { - if p == nil || p.Path == nil { - return "" - } - return *p.Path -} - -// GetTotalPages returns the TotalPages field if it's non-nil, zero value otherwise. -func (p *PageStats) GetTotalPages() int { - if p == nil || p.TotalPages == nil { - return 0 - } - return *p.TotalPages -} - -// GetBuildType returns the BuildType field if it's non-nil, zero value otherwise. -func (p *PagesUpdate) GetBuildType() string { - if p == nil || p.BuildType == nil { - return "" - } - return *p.BuildType -} - -// GetCNAME returns the CNAME field if it's non-nil, zero value otherwise. -func (p *PagesUpdate) GetCNAME() string { - if p == nil || p.CNAME == nil { - return "" - } - return *p.CNAME -} - -// GetHTTPSEnforced returns the HTTPSEnforced field if it's non-nil, zero value otherwise. -func (p *PagesUpdate) GetHTTPSEnforced() bool { - if p == nil || p.HTTPSEnforced == nil { - return false - } - return *p.HTTPSEnforced -} - -// GetPublic returns the Public field if it's non-nil, zero value otherwise. -func (p *PagesUpdate) GetPublic() bool { - if p == nil || p.Public == nil { - return false - } - return *p.Public -} - -// GetSource returns the Source field. -func (p *PagesUpdate) GetSource() *PagesSource { - if p == nil { - return nil - } - return p.Source -} - -// GetHook returns the Hook field. -func (p *PingEvent) GetHook() *Hook { - if p == nil { - return nil - } - return p.Hook -} - -// GetHookID returns the HookID field if it's non-nil, zero value otherwise. -func (p *PingEvent) GetHookID() int64 { - if p == nil || p.HookID == nil { - return 0 - } - return *p.HookID -} - -// GetInstallation returns the Installation field. -func (p *PingEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *PingEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetRepo returns the Repo field. -func (p *PingEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PingEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetZen returns the Zen field if it's non-nil, zero value otherwise. -func (p *PingEvent) GetZen() string { - if p == nil || p.Zen == nil { - return "" - } - return *p.Zen -} - -// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise. -func (p *Plan) GetCollaborators() int { - if p == nil || p.Collaborators == nil { - return 0 - } - return *p.Collaborators -} - -// GetFilledSeats returns the FilledSeats field if it's non-nil, zero value otherwise. -func (p *Plan) GetFilledSeats() int { - if p == nil || p.FilledSeats == nil { - return 0 - } - return *p.FilledSeats -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *Plan) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetPrivateRepos returns the PrivateRepos field if it's non-nil, zero value otherwise. -func (p *Plan) GetPrivateRepos() int64 { - if p == nil || p.PrivateRepos == nil { - return 0 - } - return *p.PrivateRepos -} - -// GetSeats returns the Seats field if it's non-nil, zero value otherwise. -func (p *Plan) GetSeats() int { - if p == nil || p.Seats == nil { - return 0 - } - return *p.Seats -} - -// GetSpace returns the Space field if it's non-nil, zero value otherwise. -func (p *Plan) GetSpace() int { - if p == nil || p.Space == nil { - return 0 - } - return *p.Space -} - -// GetCode returns the Code field if it's non-nil, zero value otherwise. -func (p *PolicyOverrideReason) GetCode() string { - if p == nil || p.Code == nil { - return "" - } - return *p.Code -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PolicyOverrideReason) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetConfigURL returns the ConfigURL field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetConfigURL() string { - if p == nil || p.ConfigURL == nil { - return "" - } - return *p.ConfigURL -} - -// GetEnforcement returns the Enforcement field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetEnforcement() string { - if p == nil || p.Enforcement == nil { - return "" - } - return *p.Enforcement -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PreReceiveHook) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetHRef returns the HRef field if it's non-nil, zero value otherwise. -func (p *PRLink) GetHRef() string { - if p == nil || p.HRef == nil { - return "" - } - return *p.HRef -} - -// GetComments returns the Comments field. -func (p *PRLinks) GetComments() *PRLink { - if p == nil { - return nil - } - return p.Comments -} - -// GetCommits returns the Commits field. -func (p *PRLinks) GetCommits() *PRLink { - if p == nil { - return nil - } - return p.Commits -} - -// GetHTML returns the HTML field. -func (p *PRLinks) GetHTML() *PRLink { - if p == nil { - return nil - } - return p.HTML -} - -// GetIssue returns the Issue field. -func (p *PRLinks) GetIssue() *PRLink { - if p == nil { - return nil - } - return p.Issue -} - -// GetReviewComment returns the ReviewComment field. -func (p *PRLinks) GetReviewComment() *PRLink { - if p == nil { - return nil - } - return p.ReviewComment -} - -// GetReviewComments returns the ReviewComments field. -func (p *PRLinks) GetReviewComments() *PRLink { - if p == nil { - return nil - } - return p.ReviewComments -} - -// GetSelf returns the Self field. -func (p *PRLinks) GetSelf() *PRLink { - if p == nil { - return nil - } - return p.Self -} - -// GetStatuses returns the Statuses field. -func (p *PRLinks) GetStatuses() *PRLink { - if p == nil { - return nil - } - return p.Statuses -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *Project) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetColumnsURL returns the ColumnsURL field if it's non-nil, zero value otherwise. -func (p *Project) GetColumnsURL() string { - if p == nil || p.ColumnsURL == nil { - return "" - } - return *p.ColumnsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *Project) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetCreator returns the Creator field. -func (p *Project) GetCreator() *User { - if p == nil { - return nil - } - return p.Creator -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *Project) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *Project) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *Project) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *Project) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (p *Project) GetNumber() int { - if p == nil || p.Number == nil { - return 0 - } - return *p.Number -} - -// GetOrganizationPermission returns the OrganizationPermission field if it's non-nil, zero value otherwise. -func (p *Project) GetOrganizationPermission() string { - if p == nil || p.OrganizationPermission == nil { - return "" - } - return *p.OrganizationPermission -} - -// GetOwnerURL returns the OwnerURL field if it's non-nil, zero value otherwise. -func (p *Project) GetOwnerURL() string { - if p == nil || p.OwnerURL == nil { - return "" - } - return *p.OwnerURL -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (p *Project) GetPrivate() bool { - if p == nil || p.Private == nil { - return false - } - return *p.Private -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *Project) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *Project) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *Project) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (p *ProjectBody) GetFrom() string { - if p == nil || p.From == nil { - return "" - } - return *p.From -} - -// GetArchived returns the Archived field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetArchived() bool { - if p == nil || p.Archived == nil { - return false - } - return *p.Archived -} - -// GetColumnID returns the ColumnID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetColumnID() int64 { - if p == nil || p.ColumnID == nil { - return 0 - } - return *p.ColumnID -} - -// GetColumnName returns the ColumnName field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetColumnName() string { - if p == nil || p.ColumnName == nil { - return "" - } - return *p.ColumnName -} - -// GetColumnURL returns the ColumnURL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetColumnURL() string { - if p == nil || p.ColumnURL == nil { - return "" - } - return *p.ColumnURL -} - -// GetContentURL returns the ContentURL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetContentURL() string { - if p == nil || p.ContentURL == nil { - return "" - } - return *p.ContentURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetCreator returns the Creator field. -func (p *ProjectCard) GetCreator() *User { - if p == nil { - return nil - } - return p.Creator -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetNote() string { - if p == nil || p.Note == nil { - return "" - } - return *p.Note -} - -// GetPreviousColumnName returns the PreviousColumnName field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetPreviousColumnName() string { - if p == nil || p.PreviousColumnName == nil { - return "" - } - return *p.PreviousColumnName -} - -// GetProjectID returns the ProjectID field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetProjectID() int64 { - if p == nil || p.ProjectID == nil { - return 0 - } - return *p.ProjectID -} - -// GetProjectURL returns the ProjectURL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetProjectURL() string { - if p == nil || p.ProjectURL == nil { - return "" - } - return *p.ProjectURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *ProjectCard) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetNote returns the Note field. -func (p *ProjectCardChange) GetNote() *ProjectCardNote { - if p == nil { - return nil - } - return p.Note -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *ProjectCardEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAfterID returns the AfterID field if it's non-nil, zero value otherwise. -func (p *ProjectCardEvent) GetAfterID() int64 { - if p == nil || p.AfterID == nil { - return 0 - } - return *p.AfterID -} - -// GetChanges returns the Changes field. -func (p *ProjectCardEvent) GetChanges() *ProjectCardChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *ProjectCardEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *ProjectCardEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetProjectCard returns the ProjectCard field. -func (p *ProjectCardEvent) GetProjectCard() *ProjectCard { - if p == nil { - return nil - } - return p.ProjectCard -} - -// GetRepo returns the Repo field. -func (p *ProjectCardEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *ProjectCardEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetArchivedState returns the ArchivedState field if it's non-nil, zero value otherwise. -func (p *ProjectCardListOptions) GetArchivedState() string { - if p == nil || p.ArchivedState == nil { - return "" - } - return *p.ArchivedState -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (p *ProjectCardNote) GetFrom() string { - if p == nil || p.From == nil { - return "" - } - return *p.From -} - -// GetArchived returns the Archived field if it's non-nil, zero value otherwise. -func (p *ProjectCardOptions) GetArchived() bool { - if p == nil || p.Archived == nil { - return false - } - return *p.Archived -} - -// GetBody returns the Body field. -func (p *ProjectChange) GetBody() *ProjectBody { - if p == nil { - return nil - } - return p.Body -} - -// GetName returns the Name field. -func (p *ProjectChange) GetName() *ProjectName { - if p == nil { - return nil - } - return p.Name -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (p *ProjectCollaboratorOptions) GetPermission() string { - if p == nil || p.Permission == nil { - return "" - } - return *p.Permission -} - -// GetCardsURL returns the CardsURL field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetCardsURL() string { - if p == nil || p.CardsURL == nil { - return "" - } - return *p.CardsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetProjectURL returns the ProjectURL field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetProjectURL() string { - if p == nil || p.ProjectURL == nil { - return "" - } - return *p.ProjectURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *ProjectColumn) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetName returns the Name field. -func (p *ProjectColumnChange) GetName() *ProjectColumnName { - if p == nil { - return nil - } - return p.Name -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *ProjectColumnEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAfterID returns the AfterID field if it's non-nil, zero value otherwise. -func (p *ProjectColumnEvent) GetAfterID() int64 { - if p == nil || p.AfterID == nil { - return 0 - } - return *p.AfterID -} - -// GetChanges returns the Changes field. -func (p *ProjectColumnEvent) GetChanges() *ProjectColumnChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *ProjectColumnEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *ProjectColumnEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetProjectColumn returns the ProjectColumn field. -func (p *ProjectColumnEvent) GetProjectColumn() *ProjectColumn { - if p == nil { - return nil - } - return p.ProjectColumn -} - -// GetRepo returns the Repo field. -func (p *ProjectColumnEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *ProjectColumnEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (p *ProjectColumnName) GetFrom() string { - if p == nil || p.From == nil { - return "" - } - return *p.From -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *ProjectEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetChanges returns the Changes field. -func (p *ProjectEvent) GetChanges() *ProjectChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *ProjectEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrg returns the Org field. -func (p *ProjectEvent) GetOrg() *Organization { - if p == nil { - return nil - } - return p.Org -} - -// GetProject returns the Project field. -func (p *ProjectEvent) GetProject() *Project { - if p == nil { - return nil - } - return p.Project -} - -// GetRepo returns the Repo field. -func (p *ProjectEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *ProjectEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (p *ProjectName) GetFrom() string { - if p == nil || p.From == nil { - return "" - } - return *p.From -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetOrganizationPermission returns the OrganizationPermission field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetOrganizationPermission() string { - if p == nil || p.OrganizationPermission == nil { - return "" - } - return *p.OrganizationPermission -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetPrivate() bool { - if p == nil || p.Private == nil { - return false - } - return *p.Private -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *ProjectOptions) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (p *ProjectPermissionLevel) GetPermission() string { - if p == nil || p.Permission == nil { - return "" - } - return *p.Permission -} - -// GetUser returns the User field. -func (p *ProjectPermissionLevel) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetAllowDeletions returns the AllowDeletions field. -func (p *Protection) GetAllowDeletions() *AllowDeletions { - if p == nil { - return nil - } - return p.AllowDeletions -} - -// GetAllowForcePushes returns the AllowForcePushes field. -func (p *Protection) GetAllowForcePushes() *AllowForcePushes { - if p == nil { - return nil - } - return p.AllowForcePushes -} - -// GetAllowForkSyncing returns the AllowForkSyncing field. -func (p *Protection) GetAllowForkSyncing() *AllowForkSyncing { - if p == nil { - return nil - } - return p.AllowForkSyncing -} - -// GetBlockCreations returns the BlockCreations field. -func (p *Protection) GetBlockCreations() *BlockCreations { - if p == nil { - return nil - } - return p.BlockCreations -} - -// GetEnforceAdmins returns the EnforceAdmins field. -func (p *Protection) GetEnforceAdmins() *AdminEnforcement { - if p == nil { - return nil - } - return p.EnforceAdmins -} - -// GetLockBranch returns the LockBranch field. -func (p *Protection) GetLockBranch() *LockBranch { - if p == nil { - return nil - } - return p.LockBranch -} - -// GetRequiredConversationResolution returns the RequiredConversationResolution field. -func (p *Protection) GetRequiredConversationResolution() *RequiredConversationResolution { - if p == nil { - return nil - } - return p.RequiredConversationResolution -} - -// GetRequiredPullRequestReviews returns the RequiredPullRequestReviews field. -func (p *Protection) GetRequiredPullRequestReviews() *PullRequestReviewsEnforcement { - if p == nil { - return nil - } - return p.RequiredPullRequestReviews -} - -// GetRequiredSignatures returns the RequiredSignatures field. -func (p *Protection) GetRequiredSignatures() *SignaturesProtectedBranch { - if p == nil { - return nil - } - return p.RequiredSignatures -} - -// GetRequiredStatusChecks returns the RequiredStatusChecks field. -func (p *Protection) GetRequiredStatusChecks() *RequiredStatusChecks { - if p == nil { - return nil - } - return p.RequiredStatusChecks -} - -// GetRequireLinearHistory returns the RequireLinearHistory field. -func (p *Protection) GetRequireLinearHistory() *RequireLinearHistory { - if p == nil { - return nil - } - return p.RequireLinearHistory -} - -// GetRestrictions returns the Restrictions field. -func (p *Protection) GetRestrictions() *BranchRestrictions { - if p == nil { - return nil - } - return p.Restrictions -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *Protection) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetAdminEnforced returns the AdminEnforced field. -func (p *ProtectionChanges) GetAdminEnforced() *AdminEnforcedChanges { - if p == nil { - return nil - } - return p.AdminEnforced -} - -// GetAllowDeletionsEnforcementLevel returns the AllowDeletionsEnforcementLevel field. -func (p *ProtectionChanges) GetAllowDeletionsEnforcementLevel() *AllowDeletionsEnforcementLevelChanges { - if p == nil { - return nil - } - return p.AllowDeletionsEnforcementLevel -} - -// GetAuthorizedActorNames returns the AuthorizedActorNames field. -func (p *ProtectionChanges) GetAuthorizedActorNames() *AuthorizedActorNames { - if p == nil { - return nil - } - return p.AuthorizedActorNames -} - -// GetAuthorizedActorsOnly returns the AuthorizedActorsOnly field. -func (p *ProtectionChanges) GetAuthorizedActorsOnly() *AuthorizedActorsOnly { - if p == nil { - return nil - } - return p.AuthorizedActorsOnly -} - -// GetAuthorizedDismissalActorsOnly returns the AuthorizedDismissalActorsOnly field. -func (p *ProtectionChanges) GetAuthorizedDismissalActorsOnly() *AuthorizedDismissalActorsOnlyChanges { - if p == nil { - return nil - } - return p.AuthorizedDismissalActorsOnly -} - -// GetCreateProtected returns the CreateProtected field. -func (p *ProtectionChanges) GetCreateProtected() *CreateProtectedChanges { - if p == nil { - return nil - } - return p.CreateProtected -} - -// GetDismissStaleReviewsOnPush returns the DismissStaleReviewsOnPush field. -func (p *ProtectionChanges) GetDismissStaleReviewsOnPush() *DismissStaleReviewsOnPushChanges { - if p == nil { - return nil - } - return p.DismissStaleReviewsOnPush -} - -// GetLinearHistoryRequirementEnforcementLevel returns the LinearHistoryRequirementEnforcementLevel field. -func (p *ProtectionChanges) GetLinearHistoryRequirementEnforcementLevel() *LinearHistoryRequirementEnforcementLevelChanges { - if p == nil { - return nil - } - return p.LinearHistoryRequirementEnforcementLevel -} - -// GetPullRequestReviewsEnforcementLevel returns the PullRequestReviewsEnforcementLevel field. -func (p *ProtectionChanges) GetPullRequestReviewsEnforcementLevel() *PullRequestReviewsEnforcementLevelChanges { - if p == nil { - return nil - } - return p.PullRequestReviewsEnforcementLevel -} - -// GetRequireCodeOwnerReview returns the RequireCodeOwnerReview field. -func (p *ProtectionChanges) GetRequireCodeOwnerReview() *RequireCodeOwnerReviewChanges { - if p == nil { - return nil - } - return p.RequireCodeOwnerReview -} - -// GetRequiredConversationResolutionLevel returns the RequiredConversationResolutionLevel field. -func (p *ProtectionChanges) GetRequiredConversationResolutionLevel() *RequiredConversationResolutionLevelChanges { - if p == nil { - return nil - } - return p.RequiredConversationResolutionLevel -} - -// GetRequiredDeploymentsEnforcementLevel returns the RequiredDeploymentsEnforcementLevel field. -func (p *ProtectionChanges) GetRequiredDeploymentsEnforcementLevel() *RequiredDeploymentsEnforcementLevelChanges { - if p == nil { - return nil - } - return p.RequiredDeploymentsEnforcementLevel -} - -// GetRequiredStatusChecks returns the RequiredStatusChecks field. -func (p *ProtectionChanges) GetRequiredStatusChecks() *RequiredStatusChecksChanges { - if p == nil { - return nil - } - return p.RequiredStatusChecks -} - -// GetRequiredStatusChecksEnforcementLevel returns the RequiredStatusChecksEnforcementLevel field. -func (p *ProtectionChanges) GetRequiredStatusChecksEnforcementLevel() *RequiredStatusChecksEnforcementLevelChanges { - if p == nil { - return nil - } - return p.RequiredStatusChecksEnforcementLevel -} - -// GetSignatureRequirementEnforcementLevel returns the SignatureRequirementEnforcementLevel field. -func (p *ProtectionChanges) GetSignatureRequirementEnforcementLevel() *SignatureRequirementEnforcementLevelChanges { - if p == nil { - return nil - } - return p.SignatureRequirementEnforcementLevel -} - -// GetAllowDeletions returns the AllowDeletions field if it's non-nil, zero value otherwise. -func (p *ProtectionRequest) GetAllowDeletions() bool { - if p == nil || p.AllowDeletions == nil { - return false - } - return *p.AllowDeletions -} - -// GetAllowForcePushes returns the AllowForcePushes field if it's non-nil, zero value otherwise. -func (p *ProtectionRequest) GetAllowForcePushes() bool { - if p == nil || p.AllowForcePushes == nil { - return false - } - return *p.AllowForcePushes -} - -// GetAllowForkSyncing returns the AllowForkSyncing field if it's non-nil, zero value otherwise. -func (p *ProtectionRequest) GetAllowForkSyncing() bool { - if p == nil || p.AllowForkSyncing == nil { - return false - } - return *p.AllowForkSyncing -} - -// GetBlockCreations returns the BlockCreations field if it's non-nil, zero value otherwise. -func (p *ProtectionRequest) GetBlockCreations() bool { - if p == nil || p.BlockCreations == nil { - return false - } - return *p.BlockCreations -} - -// GetLockBranch returns the LockBranch field if it's non-nil, zero value otherwise. -func (p *ProtectionRequest) GetLockBranch() bool { - if p == nil || p.LockBranch == nil { - return false - } - return *p.LockBranch -} - -// GetRequiredConversationResolution returns the RequiredConversationResolution field if it's non-nil, zero value otherwise. -func (p *ProtectionRequest) GetRequiredConversationResolution() bool { - if p == nil || p.RequiredConversationResolution == nil { - return false - } - return *p.RequiredConversationResolution -} - -// GetRequiredPullRequestReviews returns the RequiredPullRequestReviews field. -func (p *ProtectionRequest) GetRequiredPullRequestReviews() *PullRequestReviewsEnforcementRequest { - if p == nil { - return nil - } - return p.RequiredPullRequestReviews -} - -// GetRequiredStatusChecks returns the RequiredStatusChecks field. -func (p *ProtectionRequest) GetRequiredStatusChecks() *RequiredStatusChecks { - if p == nil { - return nil - } - return p.RequiredStatusChecks -} - -// GetRequireLinearHistory returns the RequireLinearHistory field if it's non-nil, zero value otherwise. -func (p *ProtectionRequest) GetRequireLinearHistory() bool { - if p == nil || p.RequireLinearHistory == nil { - return false - } - return *p.RequireLinearHistory -} - -// GetRestrictions returns the Restrictions field. -func (p *ProtectionRequest) GetRestrictions() *BranchRestrictionsRequest { - if p == nil { - return nil - } - return p.Restrictions -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *ProtectionRule) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *ProtectionRule) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (p *ProtectionRule) GetType() string { - if p == nil || p.Type == nil { - return "" - } - return *p.Type -} - -// GetWaitTimer returns the WaitTimer field if it's non-nil, zero value otherwise. -func (p *ProtectionRule) GetWaitTimer() int { - if p == nil || p.WaitTimer == nil { - return 0 - } - return *p.WaitTimer -} - -// GetInstallation returns the Installation field. -func (p *PublicEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetRepo returns the Repo field. -func (p *PublicEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PublicEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (p *PublicKey) GetKey() string { - if p == nil || p.Key == nil { - return "" - } - return *p.Key -} - -// GetKeyID returns the KeyID field if it's non-nil, zero value otherwise. -func (p *PublicKey) GetKeyID() string { - if p == nil || p.KeyID == nil { - return "" - } - return *p.KeyID -} - -// GetActiveLockReason returns the ActiveLockReason field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetActiveLockReason() string { - if p == nil || p.ActiveLockReason == nil { - return "" - } - return *p.ActiveLockReason -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetAdditions() int { - if p == nil || p.Additions == nil { - return 0 - } - return *p.Additions -} - -// GetAssignee returns the Assignee field. -func (p *PullRequest) GetAssignee() *User { - if p == nil { - return nil - } - return p.Assignee -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetAuthorAssociation() string { - if p == nil || p.AuthorAssociation == nil { - return "" - } - return *p.AuthorAssociation -} - -// GetAutoMerge returns the AutoMerge field. -func (p *PullRequest) GetAutoMerge() *PullRequestAutoMerge { - if p == nil { - return nil - } - return p.AutoMerge -} - -// GetBase returns the Base field. -func (p *PullRequest) GetBase() *PullRequestBranch { - if p == nil { - return nil - } - return p.Base -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetChangedFiles returns the ChangedFiles field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetChangedFiles() int { - if p == nil || p.ChangedFiles == nil { - return 0 - } - return *p.ChangedFiles -} - -// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetClosedAt() Timestamp { - if p == nil || p.ClosedAt == nil { - return Timestamp{} - } - return *p.ClosedAt -} - -// GetComments returns the Comments field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetComments() int { - if p == nil || p.Comments == nil { - return 0 - } - return *p.Comments -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCommentsURL() string { - if p == nil || p.CommentsURL == nil { - return "" - } - return *p.CommentsURL -} - -// GetCommits returns the Commits field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCommits() int { - if p == nil || p.Commits == nil { - return 0 - } - return *p.Commits -} - -// GetCommitsURL returns the CommitsURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCommitsURL() string { - if p == nil || p.CommitsURL == nil { - return "" - } - return *p.CommitsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetDeletions() int { - if p == nil || p.Deletions == nil { - return 0 - } - return *p.Deletions -} - -// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetDiffURL() string { - if p == nil || p.DiffURL == nil { - return "" - } - return *p.DiffURL -} - -// GetDraft returns the Draft field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetDraft() bool { - if p == nil || p.Draft == nil { - return false - } - return *p.Draft -} - -// GetHead returns the Head field. -func (p *PullRequest) GetHead() *PullRequestBranch { - if p == nil { - return nil - } - return p.Head -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetIssueURL returns the IssueURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetIssueURL() string { - if p == nil || p.IssueURL == nil { - return "" - } - return *p.IssueURL -} - -// GetLinks returns the Links field. -func (p *PullRequest) GetLinks() *PRLinks { - if p == nil { - return nil - } - return p.Links -} - -// GetLocked returns the Locked field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetLocked() bool { - if p == nil || p.Locked == nil { - return false - } - return *p.Locked -} - -// GetMaintainerCanModify returns the MaintainerCanModify field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMaintainerCanModify() bool { - if p == nil || p.MaintainerCanModify == nil { - return false - } - return *p.MaintainerCanModify -} - -// GetMergeable returns the Mergeable field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergeable() bool { - if p == nil || p.Mergeable == nil { - return false - } - return *p.Mergeable -} - -// GetMergeableState returns the MergeableState field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergeableState() string { - if p == nil || p.MergeableState == nil { - return "" - } - return *p.MergeableState -} - -// GetMergeCommitSHA returns the MergeCommitSHA field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergeCommitSHA() string { - if p == nil || p.MergeCommitSHA == nil { - return "" - } - return *p.MergeCommitSHA -} - -// GetMerged returns the Merged field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMerged() bool { - if p == nil || p.Merged == nil { - return false - } - return *p.Merged -} - -// GetMergedAt returns the MergedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergedAt() Timestamp { - if p == nil || p.MergedAt == nil { - return Timestamp{} - } - return *p.MergedAt -} - -// GetMergedBy returns the MergedBy field. -func (p *PullRequest) GetMergedBy() *User { - if p == nil { - return nil - } - return p.MergedBy -} - -// GetMilestone returns the Milestone field. -func (p *PullRequest) GetMilestone() *Milestone { - if p == nil { - return nil - } - return p.Milestone -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetNumber() int { - if p == nil || p.Number == nil { - return 0 - } - return *p.Number -} - -// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetPatchURL() string { - if p == nil || p.PatchURL == nil { - return "" - } - return *p.PatchURL -} - -// GetRebaseable returns the Rebaseable field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetRebaseable() bool { - if p == nil || p.Rebaseable == nil { - return false - } - return *p.Rebaseable -} - -// GetReviewComments returns the ReviewComments field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetReviewComments() int { - if p == nil || p.ReviewComments == nil { - return 0 - } - return *p.ReviewComments -} - -// GetReviewCommentsURL returns the ReviewCommentsURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetReviewCommentsURL() string { - if p == nil || p.ReviewCommentsURL == nil { - return "" - } - return *p.ReviewCommentsURL -} - -// GetReviewCommentURL returns the ReviewCommentURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetReviewCommentURL() string { - if p == nil || p.ReviewCommentURL == nil { - return "" - } - return *p.ReviewCommentURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetStatusesURL() string { - if p == nil || p.StatusesURL == nil { - return "" - } - return *p.StatusesURL -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetTitle() string { - if p == nil || p.Title == nil { - return "" - } - return *p.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetUser returns the User field. -func (p *PullRequest) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetCommitMessage returns the CommitMessage field if it's non-nil, zero value otherwise. -func (p *PullRequestAutoMerge) GetCommitMessage() string { - if p == nil || p.CommitMessage == nil { - return "" - } - return *p.CommitMessage -} - -// GetCommitTitle returns the CommitTitle field if it's non-nil, zero value otherwise. -func (p *PullRequestAutoMerge) GetCommitTitle() string { - if p == nil || p.CommitTitle == nil { - return "" - } - return *p.CommitTitle -} - -// GetEnabledBy returns the EnabledBy field. -func (p *PullRequestAutoMerge) GetEnabledBy() *User { - if p == nil { - return nil - } - return p.EnabledBy -} - -// GetMergeMethod returns the MergeMethod field if it's non-nil, zero value otherwise. -func (p *PullRequestAutoMerge) GetMergeMethod() string { - if p == nil || p.MergeMethod == nil { - return "" - } - return *p.MergeMethod -} - -// GetLabel returns the Label field if it's non-nil, zero value otherwise. -func (p *PullRequestBranch) GetLabel() string { - if p == nil || p.Label == nil { - return "" - } - return *p.Label -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (p *PullRequestBranch) GetRef() string { - if p == nil || p.Ref == nil { - return "" - } - return *p.Ref -} - -// GetRepo returns the Repo field. -func (p *PullRequestBranch) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (p *PullRequestBranch) GetSHA() string { - if p == nil || p.SHA == nil { - return "" - } - return *p.SHA -} - -// GetUser returns the User field. -func (p *PullRequestBranch) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetExpectedHeadSHA returns the ExpectedHeadSHA field if it's non-nil, zero value otherwise. -func (p *PullRequestBranchUpdateOptions) GetExpectedHeadSHA() string { - if p == nil || p.ExpectedHeadSHA == nil { - return "" - } - return *p.ExpectedHeadSHA -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PullRequestBranchUpdateResponse) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PullRequestBranchUpdateResponse) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetAuthorAssociation() string { - if p == nil || p.AuthorAssociation == nil { - return "" - } - return *p.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetCommitID() string { - if p == nil || p.CommitID == nil { - return "" - } - return *p.CommitID -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDiffHunk returns the DiffHunk field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetDiffHunk() string { - if p == nil || p.DiffHunk == nil { - return "" - } - return *p.DiffHunk -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetInReplyTo returns the InReplyTo field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetInReplyTo() int64 { - if p == nil || p.InReplyTo == nil { - return 0 - } - return *p.InReplyTo -} - -// GetLine returns the Line field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetLine() int { - if p == nil || p.Line == nil { - return 0 - } - return *p.Line -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetOriginalCommitID returns the OriginalCommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetOriginalCommitID() string { - if p == nil || p.OriginalCommitID == nil { - return "" - } - return *p.OriginalCommitID -} - -// GetOriginalLine returns the OriginalLine field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetOriginalLine() int { - if p == nil || p.OriginalLine == nil { - return 0 - } - return *p.OriginalLine -} - -// GetOriginalPosition returns the OriginalPosition field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetOriginalPosition() int { - if p == nil || p.OriginalPosition == nil { - return 0 - } - return *p.OriginalPosition -} - -// GetOriginalStartLine returns the OriginalStartLine field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetOriginalStartLine() int { - if p == nil || p.OriginalStartLine == nil { - return 0 - } - return *p.OriginalStartLine -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPath() string { - if p == nil || p.Path == nil { - return "" - } - return *p.Path -} - -// GetPosition returns the Position field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPosition() int { - if p == nil || p.Position == nil { - return 0 - } - return *p.Position -} - -// GetPullRequestReviewID returns the PullRequestReviewID field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPullRequestReviewID() int64 { - if p == nil || p.PullRequestReviewID == nil { - return 0 - } - return *p.PullRequestReviewID -} - -// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetPullRequestURL() string { - if p == nil || p.PullRequestURL == nil { - return "" - } - return *p.PullRequestURL -} - -// GetReactions returns the Reactions field. -func (p *PullRequestComment) GetReactions() *Reactions { - if p == nil { - return nil - } - return p.Reactions -} - -// GetSide returns the Side field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetSide() string { - if p == nil || p.Side == nil { - return "" - } - return *p.Side -} - -// GetStartLine returns the StartLine field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetStartLine() int { - if p == nil || p.StartLine == nil { - return 0 - } - return *p.StartLine -} - -// GetStartSide returns the StartSide field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetStartSide() string { - if p == nil || p.StartSide == nil { - return "" - } - return *p.StartSide -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetUser returns the User field. -func (p *PullRequestComment) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAfter returns the After field if it's non-nil, zero value otherwise. -func (p *PullRequestEvent) GetAfter() string { - if p == nil || p.After == nil { - return "" - } - return *p.After -} - -// GetAssignee returns the Assignee field. -func (p *PullRequestEvent) GetAssignee() *User { - if p == nil { - return nil - } - return p.Assignee -} - -// GetBefore returns the Before field if it's non-nil, zero value otherwise. -func (p *PullRequestEvent) GetBefore() string { - if p == nil || p.Before == nil { - return "" - } - return *p.Before -} - -// GetChanges returns the Changes field. -func (p *PullRequestEvent) GetChanges() *EditChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *PullRequestEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetLabel returns the Label field. -func (p *PullRequestEvent) GetLabel() *Label { - if p == nil { - return nil - } - return p.Label -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (p *PullRequestEvent) GetNumber() int { - if p == nil || p.Number == nil { - return 0 - } - return *p.Number -} - -// GetOrganization returns the Organization field. -func (p *PullRequestEvent) GetOrganization() *Organization { - if p == nil { - return nil - } - return p.Organization -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetRequestedReviewer returns the RequestedReviewer field. -func (p *PullRequestEvent) GetRequestedReviewer() *User { - if p == nil { - return nil - } - return p.RequestedReviewer -} - -// GetRequestedTeam returns the RequestedTeam field. -func (p *PullRequestEvent) GetRequestedTeam() *Team { - if p == nil { - return nil - } - return p.RequestedTeam -} - -// GetSender returns the Sender field. -func (p *PullRequestEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetDiffURL() string { - if p == nil || p.DiffURL == nil { - return "" - } - return *p.DiffURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetPatchURL() string { - if p == nil || p.PatchURL == nil { - return "" - } - return *p.PatchURL -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PullRequestLinks) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetMerged returns the Merged field if it's non-nil, zero value otherwise. -func (p *PullRequestMergeResult) GetMerged() bool { - if p == nil || p.Merged == nil { - return false - } - return *p.Merged -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PullRequestMergeResult) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (p *PullRequestMergeResult) GetSHA() string { - if p == nil || p.SHA == nil { - return "" - } - return *p.SHA -} - -// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetAuthorAssociation() string { - if p == nil || p.AuthorAssociation == nil { - return "" - } - return *p.AuthorAssociation -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetCommitID() string { - if p == nil || p.CommitID == nil { - return "" - } - return *p.CommitID -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetPullRequestURL() string { - if p == nil || p.PullRequestURL == nil { - return "" - } - return *p.PullRequestURL -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetState() string { - if p == nil || p.State == nil { - return "" - } - return *p.State -} - -// GetSubmittedAt returns the SubmittedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetSubmittedAt() Timestamp { - if p == nil || p.SubmittedAt == nil { - return Timestamp{} - } - return *p.SubmittedAt -} - -// GetUser returns the User field. -func (p *PullRequestReview) GetUser() *User { - if p == nil { - return nil - } - return p.User -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewCommentEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetChanges returns the Changes field. -func (p *PullRequestReviewCommentEvent) GetChanges() *EditChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetComment returns the Comment field. -func (p *PullRequestReviewCommentEvent) GetComment() *PullRequestComment { - if p == nil { - return nil - } - return p.Comment -} - -// GetInstallation returns the Installation field. -func (p *PullRequestReviewCommentEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestReviewCommentEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestReviewCommentEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PullRequestReviewCommentEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewDismissalRequest) GetMessage() string { - if p == nil || p.Message == nil { - return "" - } - return *p.Message -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetInstallation returns the Installation field. -func (p *PullRequestReviewEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrganization returns the Organization field. -func (p *PullRequestReviewEvent) GetOrganization() *Organization { - if p == nil { - return nil - } - return p.Organization -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestReviewEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestReviewEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetReview returns the Review field. -func (p *PullRequestReviewEvent) GetReview() *PullRequestReview { - if p == nil { - return nil - } - return p.Review -} - -// GetSender returns the Sender field. -func (p *PullRequestReviewEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewRequest) GetBody() string { - if p == nil || p.Body == nil { - return "" - } - return *p.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewRequest) GetCommitID() string { - if p == nil || p.CommitID == nil { - return "" - } - return *p.CommitID -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewRequest) GetEvent() string { - if p == nil || p.Event == nil { - return "" - } - return *p.Event -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewRequest) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetBypassPullRequestAllowances returns the BypassPullRequestAllowances field. -func (p *PullRequestReviewsEnforcement) GetBypassPullRequestAllowances() *BypassPullRequestAllowances { - if p == nil { - return nil - } - return p.BypassPullRequestAllowances -} - -// GetDismissalRestrictions returns the DismissalRestrictions field. -func (p *PullRequestReviewsEnforcement) GetDismissalRestrictions() *DismissalRestrictions { - if p == nil { - return nil - } - return p.DismissalRestrictions -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewsEnforcementLevelChanges) GetFrom() string { - if p == nil || p.From == nil { - return "" - } - return *p.From -} - -// GetBypassPullRequestAllowancesRequest returns the BypassPullRequestAllowancesRequest field. -func (p *PullRequestReviewsEnforcementRequest) GetBypassPullRequestAllowancesRequest() *BypassPullRequestAllowancesRequest { - if p == nil { - return nil - } - return p.BypassPullRequestAllowancesRequest -} - -// GetDismissalRestrictionsRequest returns the DismissalRestrictionsRequest field. -func (p *PullRequestReviewsEnforcementRequest) GetDismissalRestrictionsRequest() *DismissalRestrictionsRequest { - if p == nil { - return nil - } - return p.DismissalRestrictionsRequest -} - -// GetRequireLastPushApproval returns the RequireLastPushApproval field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewsEnforcementRequest) GetRequireLastPushApproval() bool { - if p == nil || p.RequireLastPushApproval == nil { - return false - } - return *p.RequireLastPushApproval -} - -// GetBypassPullRequestAllowancesRequest returns the BypassPullRequestAllowancesRequest field. -func (p *PullRequestReviewsEnforcementUpdate) GetBypassPullRequestAllowancesRequest() *BypassPullRequestAllowancesRequest { - if p == nil { - return nil - } - return p.BypassPullRequestAllowancesRequest -} - -// GetDismissalRestrictionsRequest returns the DismissalRestrictionsRequest field. -func (p *PullRequestReviewsEnforcementUpdate) GetDismissalRestrictionsRequest() *DismissalRestrictionsRequest { - if p == nil { - return nil - } - return p.DismissalRestrictionsRequest -} - -// GetDismissStaleReviews returns the DismissStaleReviews field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewsEnforcementUpdate) GetDismissStaleReviews() bool { - if p == nil || p.DismissStaleReviews == nil { - return false - } - return *p.DismissStaleReviews -} - -// GetRequireCodeOwnerReviews returns the RequireCodeOwnerReviews field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewsEnforcementUpdate) GetRequireCodeOwnerReviews() bool { - if p == nil || p.RequireCodeOwnerReviews == nil { - return false - } - return *p.RequireCodeOwnerReviews -} - -// GetRequireLastPushApproval returns the RequireLastPushApproval field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewsEnforcementUpdate) GetRequireLastPushApproval() bool { - if p == nil || p.RequireLastPushApproval == nil { - return false - } - return *p.RequireLastPushApproval -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestReviewThreadEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetInstallation returns the Installation field. -func (p *PullRequestReviewThreadEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestReviewThreadEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestReviewThreadEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PullRequestReviewThreadEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetThread returns the Thread field. -func (p *PullRequestReviewThreadEvent) GetThread() *PullRequestThread { - if p == nil { - return nil - } - return p.Thread -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestTargetEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAfter returns the After field if it's non-nil, zero value otherwise. -func (p *PullRequestTargetEvent) GetAfter() string { - if p == nil || p.After == nil { - return "" - } - return *p.After -} - -// GetAssignee returns the Assignee field. -func (p *PullRequestTargetEvent) GetAssignee() *User { - if p == nil { - return nil - } - return p.Assignee -} - -// GetBefore returns the Before field if it's non-nil, zero value otherwise. -func (p *PullRequestTargetEvent) GetBefore() string { - if p == nil || p.Before == nil { - return "" - } - return *p.Before -} - -// GetChanges returns the Changes field. -func (p *PullRequestTargetEvent) GetChanges() *EditChange { - if p == nil { - return nil - } - return p.Changes -} - -// GetInstallation returns the Installation field. -func (p *PullRequestTargetEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetLabel returns the Label field. -func (p *PullRequestTargetEvent) GetLabel() *Label { - if p == nil { - return nil - } - return p.Label -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (p *PullRequestTargetEvent) GetNumber() int { - if p == nil || p.Number == nil { - return 0 - } - return *p.Number -} - -// GetOrganization returns the Organization field. -func (p *PullRequestTargetEvent) GetOrganization() *Organization { - if p == nil { - return nil - } - return p.Organization -} - -// GetPullRequest returns the PullRequest field. -func (p *PullRequestTargetEvent) GetPullRequest() *PullRequest { - if p == nil { - return nil - } - return p.PullRequest -} - -// GetRepo returns the Repo field. -func (p *PullRequestTargetEvent) GetRepo() *Repository { - if p == nil { - return nil - } - return p.Repo -} - -// GetRequestedReviewer returns the RequestedReviewer field. -func (p *PullRequestTargetEvent) GetRequestedReviewer() *User { - if p == nil { - return nil - } - return p.RequestedReviewer -} - -// GetRequestedTeam returns the RequestedTeam field. -func (p *PullRequestTargetEvent) GetRequestedTeam() *Team { - if p == nil { - return nil - } - return p.RequestedTeam -} - -// GetSender returns the Sender field. -func (p *PullRequestTargetEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PullRequestThread) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PullRequestThread) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetMergablePulls returns the MergablePulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetMergablePulls() int { - if p == nil || p.MergablePulls == nil { - return 0 - } - return *p.MergablePulls -} - -// GetMergedPulls returns the MergedPulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetMergedPulls() int { - if p == nil || p.MergedPulls == nil { - return 0 - } - return *p.MergedPulls -} - -// GetTotalPulls returns the TotalPulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetTotalPulls() int { - if p == nil || p.TotalPulls == nil { - return 0 - } - return *p.TotalPulls -} - -// GetUnmergablePulls returns the UnmergablePulls field if it's non-nil, zero value otherwise. -func (p *PullStats) GetUnmergablePulls() int { - if p == nil || p.UnmergablePulls == nil { - return 0 - } - return *p.UnmergablePulls -} - -// GetCommits returns the Commits field if it's non-nil, zero value otherwise. -func (p *PunchCard) GetCommits() int { - if p == nil || p.Commits == nil { - return 0 - } - return *p.Commits -} - -// GetDay returns the Day field if it's non-nil, zero value otherwise. -func (p *PunchCard) GetDay() int { - if p == nil || p.Day == nil { - return 0 - } - return *p.Day -} - -// GetHour returns the Hour field if it's non-nil, zero value otherwise. -func (p *PunchCard) GetHour() int { - if p == nil || p.Hour == nil { - return 0 - } - return *p.Hour -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetAction() string { - if p == nil || p.Action == nil { - return "" - } - return *p.Action -} - -// GetAfter returns the After field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetAfter() string { - if p == nil || p.After == nil { - return "" - } - return *p.After -} - -// GetBaseRef returns the BaseRef field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetBaseRef() string { - if p == nil || p.BaseRef == nil { - return "" - } - return *p.BaseRef -} - -// GetBefore returns the Before field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetBefore() string { - if p == nil || p.Before == nil { - return "" - } - return *p.Before -} - -// GetCommits returns the Commits slice if it's non-nil, nil otherwise. -func (p *PushEvent) GetCommits() []*HeadCommit { - if p == nil || p.Commits == nil { - return nil - } - return p.Commits -} - -// GetCompare returns the Compare field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetCompare() string { - if p == nil || p.Compare == nil { - return "" - } - return *p.Compare -} - -// GetCreated returns the Created field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetCreated() bool { - if p == nil || p.Created == nil { - return false - } - return *p.Created -} - -// GetDeleted returns the Deleted field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetDeleted() bool { - if p == nil || p.Deleted == nil { - return false - } - return *p.Deleted -} - -// GetDistinctSize returns the DistinctSize field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetDistinctSize() int { - if p == nil || p.DistinctSize == nil { - return 0 - } - return *p.DistinctSize -} - -// GetForced returns the Forced field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetForced() bool { - if p == nil || p.Forced == nil { - return false - } - return *p.Forced -} - -// GetHead returns the Head field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetHead() string { - if p == nil || p.Head == nil { - return "" - } - return *p.Head -} - -// GetHeadCommit returns the HeadCommit field. -func (p *PushEvent) GetHeadCommit() *HeadCommit { - if p == nil { - return nil - } - return p.HeadCommit -} - -// GetInstallation returns the Installation field. -func (p *PushEvent) GetInstallation() *Installation { - if p == nil { - return nil - } - return p.Installation -} - -// GetOrganization returns the Organization field. -func (p *PushEvent) GetOrganization() *Organization { - if p == nil { - return nil - } - return p.Organization -} - -// GetPusher returns the Pusher field. -func (p *PushEvent) GetPusher() *User { - if p == nil { - return nil - } - return p.Pusher -} - -// GetPushID returns the PushID field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetPushID() int64 { - if p == nil || p.PushID == nil { - return 0 - } - return *p.PushID -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetRef() string { - if p == nil || p.Ref == nil { - return "" - } - return *p.Ref -} - -// GetRepo returns the Repo field. -func (p *PushEvent) GetRepo() *PushEventRepository { - if p == nil { - return nil - } - return p.Repo -} - -// GetSender returns the Sender field. -func (p *PushEvent) GetSender() *User { - if p == nil { - return nil - } - return p.Sender -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (p *PushEvent) GetSize() int { - if p == nil || p.Size == nil { - return 0 - } - return *p.Size -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (p *PushEventRepoOwner) GetEmail() string { - if p == nil || p.Email == nil { - return "" - } - return *p.Email -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PushEventRepoOwner) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetArchived returns the Archived field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetArchived() bool { - if p == nil || p.Archived == nil { - return false - } - return *p.Archived -} - -// GetArchiveURL returns the ArchiveURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetArchiveURL() string { - if p == nil || p.ArchiveURL == nil { - return "" - } - return *p.ArchiveURL -} - -// GetCloneURL returns the CloneURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetCloneURL() string { - if p == nil || p.CloneURL == nil { - return "" - } - return *p.CloneURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetCreatedAt() Timestamp { - if p == nil || p.CreatedAt == nil { - return Timestamp{} - } - return *p.CreatedAt -} - -// GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetDefaultBranch() string { - if p == nil || p.DefaultBranch == nil { - return "" - } - return *p.DefaultBranch -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetDescription() string { - if p == nil || p.Description == nil { - return "" - } - return *p.Description -} - -// GetDisabled returns the Disabled field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetDisabled() bool { - if p == nil || p.Disabled == nil { - return false - } - return *p.Disabled -} - -// GetFork returns the Fork field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetFork() bool { - if p == nil || p.Fork == nil { - return false - } - return *p.Fork -} - -// GetForksCount returns the ForksCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetForksCount() int { - if p == nil || p.ForksCount == nil { - return 0 - } - return *p.ForksCount -} - -// GetFullName returns the FullName field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetFullName() string { - if p == nil || p.FullName == nil { - return "" - } - return *p.FullName -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetGitURL() string { - if p == nil || p.GitURL == nil { - return "" - } - return *p.GitURL -} - -// GetHasDownloads returns the HasDownloads field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasDownloads() bool { - if p == nil || p.HasDownloads == nil { - return false - } - return *p.HasDownloads -} - -// GetHasIssues returns the HasIssues field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasIssues() bool { - if p == nil || p.HasIssues == nil { - return false - } - return *p.HasIssues -} - -// GetHasPages returns the HasPages field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasPages() bool { - if p == nil || p.HasPages == nil { - return false - } - return *p.HasPages -} - -// GetHasWiki returns the HasWiki field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHasWiki() bool { - if p == nil || p.HasWiki == nil { - return false - } - return *p.HasWiki -} - -// GetHomepage returns the Homepage field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHomepage() string { - if p == nil || p.Homepage == nil { - return "" - } - return *p.Homepage -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetHTMLURL() string { - if p == nil || p.HTMLURL == nil { - return "" - } - return *p.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetID() int64 { - if p == nil || p.ID == nil { - return 0 - } - return *p.ID -} - -// GetLanguage returns the Language field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetLanguage() string { - if p == nil || p.Language == nil { - return "" - } - return *p.Language -} - -// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetMasterBranch() string { - if p == nil || p.MasterBranch == nil { - return "" - } - return *p.MasterBranch -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetName() string { - if p == nil || p.Name == nil { - return "" - } - return *p.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetNodeID() string { - if p == nil || p.NodeID == nil { - return "" - } - return *p.NodeID -} - -// GetOpenIssuesCount returns the OpenIssuesCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetOpenIssuesCount() int { - if p == nil || p.OpenIssuesCount == nil { - return 0 - } - return *p.OpenIssuesCount -} - -// GetOrganization returns the Organization field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetOrganization() string { - if p == nil || p.Organization == nil { - return "" - } - return *p.Organization -} - -// GetOwner returns the Owner field. -func (p *PushEventRepository) GetOwner() *User { - if p == nil { - return nil - } - return p.Owner -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetPrivate() bool { - if p == nil || p.Private == nil { - return false - } - return *p.Private -} - -// GetPullsURL returns the PullsURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetPullsURL() string { - if p == nil || p.PullsURL == nil { - return "" - } - return *p.PullsURL -} - -// GetPushedAt returns the PushedAt field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetPushedAt() Timestamp { - if p == nil || p.PushedAt == nil { - return Timestamp{} - } - return *p.PushedAt -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetSize() int { - if p == nil || p.Size == nil { - return 0 - } - return *p.Size -} - -// GetSSHURL returns the SSHURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetSSHURL() string { - if p == nil || p.SSHURL == nil { - return "" - } - return *p.SSHURL -} - -// GetStargazersCount returns the StargazersCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetStargazersCount() int { - if p == nil || p.StargazersCount == nil { - return 0 - } - return *p.StargazersCount -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetStatusesURL() string { - if p == nil || p.StatusesURL == nil { - return "" - } - return *p.StatusesURL -} - -// GetSVNURL returns the SVNURL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetSVNURL() string { - if p == nil || p.SVNURL == nil { - return "" - } - return *p.SVNURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetUpdatedAt() Timestamp { - if p == nil || p.UpdatedAt == nil { - return Timestamp{} - } - return *p.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetURL() string { - if p == nil || p.URL == nil { - return "" - } - return *p.URL -} - -// GetWatchersCount returns the WatchersCount field if it's non-nil, zero value otherwise. -func (p *PushEventRepository) GetWatchersCount() int { - if p == nil || p.WatchersCount == nil { - return 0 - } - return *p.WatchersCount -} - -// GetActionsRunnerRegistration returns the ActionsRunnerRegistration field. -func (r *RateLimits) GetActionsRunnerRegistration() *Rate { - if r == nil { - return nil - } - return r.ActionsRunnerRegistration -} - -// GetCodeScanningUpload returns the CodeScanningUpload field. -func (r *RateLimits) GetCodeScanningUpload() *Rate { - if r == nil { - return nil - } - return r.CodeScanningUpload -} - -// GetCore returns the Core field. -func (r *RateLimits) GetCore() *Rate { - if r == nil { - return nil - } - return r.Core -} - -// GetGraphQL returns the GraphQL field. -func (r *RateLimits) GetGraphQL() *Rate { - if r == nil { - return nil - } - return r.GraphQL -} - -// GetIntegrationManifest returns the IntegrationManifest field. -func (r *RateLimits) GetIntegrationManifest() *Rate { - if r == nil { - return nil - } - return r.IntegrationManifest -} - -// GetSCIM returns the SCIM field. -func (r *RateLimits) GetSCIM() *Rate { - if r == nil { - return nil - } - return r.SCIM -} - -// GetSearch returns the Search field. -func (r *RateLimits) GetSearch() *Rate { - if r == nil { - return nil - } - return r.Search -} - -// GetSourceImport returns the SourceImport field. -func (r *RateLimits) GetSourceImport() *Rate { - if r == nil { - return nil - } - return r.SourceImport -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (r *Reaction) GetContent() string { - if r == nil || r.Content == nil { - return "" - } - return *r.Content -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *Reaction) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *Reaction) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetUser returns the User field. -func (r *Reaction) GetUser() *User { - if r == nil { - return nil - } - return r.User -} - -// GetConfused returns the Confused field if it's non-nil, zero value otherwise. -func (r *Reactions) GetConfused() int { - if r == nil || r.Confused == nil { - return 0 - } - return *r.Confused -} - -// GetEyes returns the Eyes field if it's non-nil, zero value otherwise. -func (r *Reactions) GetEyes() int { - if r == nil || r.Eyes == nil { - return 0 - } - return *r.Eyes -} - -// GetHeart returns the Heart field if it's non-nil, zero value otherwise. -func (r *Reactions) GetHeart() int { - if r == nil || r.Heart == nil { - return 0 - } - return *r.Heart -} - -// GetHooray returns the Hooray field if it's non-nil, zero value otherwise. -func (r *Reactions) GetHooray() int { - if r == nil || r.Hooray == nil { - return 0 - } - return *r.Hooray -} - -// GetLaugh returns the Laugh field if it's non-nil, zero value otherwise. -func (r *Reactions) GetLaugh() int { - if r == nil || r.Laugh == nil { - return 0 - } - return *r.Laugh -} - -// GetMinusOne returns the MinusOne field if it's non-nil, zero value otherwise. -func (r *Reactions) GetMinusOne() int { - if r == nil || r.MinusOne == nil { - return 0 - } - return *r.MinusOne -} - -// GetPlusOne returns the PlusOne field if it's non-nil, zero value otherwise. -func (r *Reactions) GetPlusOne() int { - if r == nil || r.PlusOne == nil { - return 0 - } - return *r.PlusOne -} - -// GetRocket returns the Rocket field if it's non-nil, zero value otherwise. -func (r *Reactions) GetRocket() int { - if r == nil || r.Rocket == nil { - return 0 - } - return *r.Rocket -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (r *Reactions) GetTotalCount() int { - if r == nil || r.TotalCount == nil { - return 0 - } - return *r.TotalCount -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *Reactions) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *Reference) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetObject returns the Object field. -func (r *Reference) GetObject() *GitObject { - if r == nil { - return nil - } - return r.Object -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (r *Reference) GetRef() string { - if r == nil || r.Ref == nil { - return "" - } - return *r.Ref -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *Reference) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (r *RegistrationToken) GetExpiresAt() Timestamp { - if r == nil || r.ExpiresAt == nil { - return Timestamp{} - } - return *r.ExpiresAt -} - -// GetToken returns the Token field if it's non-nil, zero value otherwise. -func (r *RegistrationToken) GetToken() string { - if r == nil || r.Token == nil { - return "" - } - return *r.Token -} - -// GetBrowserDownloadURL returns the BrowserDownloadURL field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetBrowserDownloadURL() string { - if r == nil || r.BrowserDownloadURL == nil { - return "" - } - return *r.BrowserDownloadURL -} - -// GetContentType returns the ContentType field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetContentType() string { - if r == nil || r.ContentType == nil { - return "" - } - return *r.ContentType -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetDownloadCount returns the DownloadCount field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetDownloadCount() int { - if r == nil || r.DownloadCount == nil { - return 0 - } - return *r.DownloadCount -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetLabel returns the Label field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetLabel() string { - if r == nil || r.Label == nil { - return "" - } - return *r.Label -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetState() string { - if r == nil || r.State == nil { - return "" - } - return *r.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetUpdatedAt() Timestamp { - if r == nil || r.UpdatedAt == nil { - return Timestamp{} - } - return *r.UpdatedAt -} - -// GetUploader returns the Uploader field. -func (r *ReleaseAsset) GetUploader() *User { - if r == nil { - return nil - } - return r.Uploader -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *ReleaseAsset) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (r *ReleaseEvent) GetAction() string { - if r == nil || r.Action == nil { - return "" - } - return *r.Action -} - -// GetInstallation returns the Installation field. -func (r *ReleaseEvent) GetInstallation() *Installation { - if r == nil { - return nil - } - return r.Installation -} - -// GetRelease returns the Release field. -func (r *ReleaseEvent) GetRelease() *RepositoryRelease { - if r == nil { - return nil - } - return r.Release -} - -// GetRepo returns the Repo field. -func (r *ReleaseEvent) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetSender returns the Sender field. -func (r *ReleaseEvent) GetSender() *User { - if r == nil { - return nil - } - return r.Sender -} - -// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (r *RemoveToken) GetExpiresAt() Timestamp { - if r == nil || r.ExpiresAt == nil { - return Timestamp{} - } - return *r.ExpiresAt -} - -// GetToken returns the Token field if it's non-nil, zero value otherwise. -func (r *RemoveToken) GetToken() string { - if r == nil || r.Token == nil { - return "" - } - return *r.Token -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (r *Rename) GetFrom() string { - if r == nil || r.From == nil { - return "" - } - return *r.From -} - -// GetTo returns the To field if it's non-nil, zero value otherwise. -func (r *Rename) GetTo() string { - if r == nil || r.To == nil { - return "" - } - return *r.To -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (r *RenameOrgResponse) GetMessage() string { - if r == nil || r.Message == nil { - return "" - } - return *r.Message -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RenameOrgResponse) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetBranch returns the Branch field if it's non-nil, zero value otherwise. -func (r *RepoMergeUpstreamRequest) GetBranch() string { - if r == nil || r.Branch == nil { - return "" - } - return *r.Branch -} - -// GetBaseBranch returns the BaseBranch field if it's non-nil, zero value otherwise. -func (r *RepoMergeUpstreamResult) GetBaseBranch() string { - if r == nil || r.BaseBranch == nil { - return "" - } - return *r.BaseBranch -} - -// GetMergeType returns the MergeType field if it's non-nil, zero value otherwise. -func (r *RepoMergeUpstreamResult) GetMergeType() string { - if r == nil || r.MergeType == nil { - return "" - } - return *r.MergeType -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (r *RepoMergeUpstreamResult) GetMessage() string { - if r == nil || r.Message == nil { - return "" - } - return *r.Message -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (r *RepoName) GetFrom() string { - if r == nil || r.From == nil { - return "" - } - return *r.From -} - -// GetBadgeURL returns the BadgeURL field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetBadgeURL() string { - if r == nil || r.BadgeURL == nil { - return "" - } - return *r.BadgeURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetPath() string { - if r == nil || r.Path == nil { - return "" - } - return *r.Path -} - -// GetSourceRepository returns the SourceRepository field. -func (r *RepoRequiredWorkflow) GetSourceRepository() *Repository { - if r == nil { - return nil - } - return r.SourceRepository -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetState() string { - if r == nil || r.State == nil { - return "" - } - return *r.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetUpdatedAt() Timestamp { - if r == nil || r.UpdatedAt == nil { - return Timestamp{} - } - return *r.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflow) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (r *RepoRequiredWorkflows) GetTotalCount() int { - if r == nil || r.TotalCount == nil { - return 0 - } - return *r.TotalCount -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (r *RepositoriesSearchResult) GetIncompleteResults() bool { - if r == nil || r.IncompleteResults == nil { - return false - } - return *r.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (r *RepositoriesSearchResult) GetTotal() int { - if r == nil || r.Total == nil { - return 0 - } - return *r.Total -} - -// GetAllowAutoMerge returns the AllowAutoMerge field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowAutoMerge() bool { - if r == nil || r.AllowAutoMerge == nil { - return false - } - return *r.AllowAutoMerge -} - -// GetAllowForking returns the AllowForking field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowForking() bool { - if r == nil || r.AllowForking == nil { - return false - } - return *r.AllowForking -} - -// GetAllowMergeCommit returns the AllowMergeCommit field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowMergeCommit() bool { - if r == nil || r.AllowMergeCommit == nil { - return false - } - return *r.AllowMergeCommit -} - -// GetAllowRebaseMerge returns the AllowRebaseMerge field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowRebaseMerge() bool { - if r == nil || r.AllowRebaseMerge == nil { - return false - } - return *r.AllowRebaseMerge -} - -// GetAllowSquashMerge returns the AllowSquashMerge field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowSquashMerge() bool { - if r == nil || r.AllowSquashMerge == nil { - return false - } - return *r.AllowSquashMerge -} - -// GetAllowUpdateBranch returns the AllowUpdateBranch field if it's non-nil, zero value otherwise. -func (r *Repository) GetAllowUpdateBranch() bool { - if r == nil || r.AllowUpdateBranch == nil { - return false - } - return *r.AllowUpdateBranch -} - -// GetArchived returns the Archived field if it's non-nil, zero value otherwise. -func (r *Repository) GetArchived() bool { - if r == nil || r.Archived == nil { - return false - } - return *r.Archived -} - -// GetArchiveURL returns the ArchiveURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetArchiveURL() string { - if r == nil || r.ArchiveURL == nil { - return "" - } - return *r.ArchiveURL -} - -// GetAssigneesURL returns the AssigneesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetAssigneesURL() string { - if r == nil || r.AssigneesURL == nil { - return "" - } - return *r.AssigneesURL -} - -// GetAutoInit returns the AutoInit field if it's non-nil, zero value otherwise. -func (r *Repository) GetAutoInit() bool { - if r == nil || r.AutoInit == nil { - return false - } - return *r.AutoInit -} - -// GetBlobsURL returns the BlobsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetBlobsURL() string { - if r == nil || r.BlobsURL == nil { - return "" - } - return *r.BlobsURL -} - -// GetBranchesURL returns the BranchesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetBranchesURL() string { - if r == nil || r.BranchesURL == nil { - return "" - } - return *r.BranchesURL -} - -// GetCloneURL returns the CloneURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCloneURL() string { - if r == nil || r.CloneURL == nil { - return "" - } - return *r.CloneURL -} - -// GetCodeOfConduct returns the CodeOfConduct field. -func (r *Repository) GetCodeOfConduct() *CodeOfConduct { - if r == nil { - return nil - } - return r.CodeOfConduct -} - -// GetCollaboratorsURL returns the CollaboratorsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCollaboratorsURL() string { - if r == nil || r.CollaboratorsURL == nil { - return "" - } - return *r.CollaboratorsURL -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCommentsURL() string { - if r == nil || r.CommentsURL == nil { - return "" - } - return *r.CommentsURL -} - -// GetCommitsURL returns the CommitsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCommitsURL() string { - if r == nil || r.CommitsURL == nil { - return "" - } - return *r.CommitsURL -} - -// GetCompareURL returns the CompareURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetCompareURL() string { - if r == nil || r.CompareURL == nil { - return "" - } - return *r.CompareURL -} - -// GetContentsURL returns the ContentsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetContentsURL() string { - if r == nil || r.ContentsURL == nil { - return "" - } - return *r.ContentsURL -} - -// GetContributorsURL returns the ContributorsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetContributorsURL() string { - if r == nil || r.ContributorsURL == nil { - return "" - } - return *r.ContributorsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *Repository) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise. -func (r *Repository) GetDefaultBranch() string { - if r == nil || r.DefaultBranch == nil { - return "" - } - return *r.DefaultBranch -} - -// GetDeleteBranchOnMerge returns the DeleteBranchOnMerge field if it's non-nil, zero value otherwise. -func (r *Repository) GetDeleteBranchOnMerge() bool { - if r == nil || r.DeleteBranchOnMerge == nil { - return false - } - return *r.DeleteBranchOnMerge -} - -// GetDeploymentsURL returns the DeploymentsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetDeploymentsURL() string { - if r == nil || r.DeploymentsURL == nil { - return "" - } - return *r.DeploymentsURL -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (r *Repository) GetDescription() string { - if r == nil || r.Description == nil { - return "" - } - return *r.Description -} - -// GetDisabled returns the Disabled field if it's non-nil, zero value otherwise. -func (r *Repository) GetDisabled() bool { - if r == nil || r.Disabled == nil { - return false - } - return *r.Disabled -} - -// GetDownloadsURL returns the DownloadsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetDownloadsURL() string { - if r == nil || r.DownloadsURL == nil { - return "" - } - return *r.DownloadsURL -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetEventsURL() string { - if r == nil || r.EventsURL == nil { - return "" - } - return *r.EventsURL -} - -// GetFork returns the Fork field if it's non-nil, zero value otherwise. -func (r *Repository) GetFork() bool { - if r == nil || r.Fork == nil { - return false - } - return *r.Fork -} - -// GetForksCount returns the ForksCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetForksCount() int { - if r == nil || r.ForksCount == nil { - return 0 - } - return *r.ForksCount -} - -// GetForksURL returns the ForksURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetForksURL() string { - if r == nil || r.ForksURL == nil { - return "" - } - return *r.ForksURL -} - -// GetFullName returns the FullName field if it's non-nil, zero value otherwise. -func (r *Repository) GetFullName() string { - if r == nil || r.FullName == nil { - return "" - } - return *r.FullName -} - -// GetGitCommitsURL returns the GitCommitsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitCommitsURL() string { - if r == nil || r.GitCommitsURL == nil { - return "" - } - return *r.GitCommitsURL -} - -// GetGitignoreTemplate returns the GitignoreTemplate field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitignoreTemplate() string { - if r == nil || r.GitignoreTemplate == nil { - return "" - } - return *r.GitignoreTemplate -} - -// GetGitRefsURL returns the GitRefsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitRefsURL() string { - if r == nil || r.GitRefsURL == nil { - return "" - } - return *r.GitRefsURL -} - -// GetGitTagsURL returns the GitTagsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitTagsURL() string { - if r == nil || r.GitTagsURL == nil { - return "" - } - return *r.GitTagsURL -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetGitURL() string { - if r == nil || r.GitURL == nil { - return "" - } - return *r.GitURL -} - -// GetHasDiscussions returns the HasDiscussions field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasDiscussions() bool { - if r == nil || r.HasDiscussions == nil { - return false - } - return *r.HasDiscussions -} - -// GetHasDownloads returns the HasDownloads field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasDownloads() bool { - if r == nil || r.HasDownloads == nil { - return false - } - return *r.HasDownloads -} - -// GetHasIssues returns the HasIssues field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasIssues() bool { - if r == nil || r.HasIssues == nil { - return false - } - return *r.HasIssues -} - -// GetHasPages returns the HasPages field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasPages() bool { - if r == nil || r.HasPages == nil { - return false - } - return *r.HasPages -} - -// GetHasProjects returns the HasProjects field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasProjects() bool { - if r == nil || r.HasProjects == nil { - return false - } - return *r.HasProjects -} - -// GetHasWiki returns the HasWiki field if it's non-nil, zero value otherwise. -func (r *Repository) GetHasWiki() bool { - if r == nil || r.HasWiki == nil { - return false - } - return *r.HasWiki -} - -// GetHomepage returns the Homepage field if it's non-nil, zero value otherwise. -func (r *Repository) GetHomepage() string { - if r == nil || r.Homepage == nil { - return "" - } - return *r.Homepage -} - -// GetHooksURL returns the HooksURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetHooksURL() string { - if r == nil || r.HooksURL == nil { - return "" - } - return *r.HooksURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *Repository) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetIssueCommentURL returns the IssueCommentURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetIssueCommentURL() string { - if r == nil || r.IssueCommentURL == nil { - return "" - } - return *r.IssueCommentURL -} - -// GetIssueEventsURL returns the IssueEventsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetIssueEventsURL() string { - if r == nil || r.IssueEventsURL == nil { - return "" - } - return *r.IssueEventsURL -} - -// GetIssuesURL returns the IssuesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetIssuesURL() string { - if r == nil || r.IssuesURL == nil { - return "" - } - return *r.IssuesURL -} - -// GetIsTemplate returns the IsTemplate field if it's non-nil, zero value otherwise. -func (r *Repository) GetIsTemplate() bool { - if r == nil || r.IsTemplate == nil { - return false - } - return *r.IsTemplate -} - -// GetKeysURL returns the KeysURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetKeysURL() string { - if r == nil || r.KeysURL == nil { - return "" - } - return *r.KeysURL -} - -// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetLabelsURL() string { - if r == nil || r.LabelsURL == nil { - return "" - } - return *r.LabelsURL -} - -// GetLanguage returns the Language field if it's non-nil, zero value otherwise. -func (r *Repository) GetLanguage() string { - if r == nil || r.Language == nil { - return "" - } - return *r.Language -} - -// GetLanguagesURL returns the LanguagesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetLanguagesURL() string { - if r == nil || r.LanguagesURL == nil { - return "" - } - return *r.LanguagesURL -} - -// GetLicense returns the License field. -func (r *Repository) GetLicense() *License { - if r == nil { - return nil - } - return r.License -} - -// GetLicenseTemplate returns the LicenseTemplate field if it's non-nil, zero value otherwise. -func (r *Repository) GetLicenseTemplate() string { - if r == nil || r.LicenseTemplate == nil { - return "" - } - return *r.LicenseTemplate -} - -// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise. -func (r *Repository) GetMasterBranch() string { - if r == nil || r.MasterBranch == nil { - return "" - } - return *r.MasterBranch -} - -// GetMergeCommitMessage returns the MergeCommitMessage field if it's non-nil, zero value otherwise. -func (r *Repository) GetMergeCommitMessage() string { - if r == nil || r.MergeCommitMessage == nil { - return "" - } - return *r.MergeCommitMessage -} - -// GetMergeCommitTitle returns the MergeCommitTitle field if it's non-nil, zero value otherwise. -func (r *Repository) GetMergeCommitTitle() string { - if r == nil || r.MergeCommitTitle == nil { - return "" - } - return *r.MergeCommitTitle -} - -// GetMergesURL returns the MergesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetMergesURL() string { - if r == nil || r.MergesURL == nil { - return "" - } - return *r.MergesURL -} - -// GetMilestonesURL returns the MilestonesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetMilestonesURL() string { - if r == nil || r.MilestonesURL == nil { - return "" - } - return *r.MilestonesURL -} - -// GetMirrorURL returns the MirrorURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetMirrorURL() string { - if r == nil || r.MirrorURL == nil { - return "" - } - return *r.MirrorURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *Repository) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNetworkCount returns the NetworkCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetNetworkCount() int { - if r == nil || r.NetworkCount == nil { - return 0 - } - return *r.NetworkCount -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *Repository) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetNotificationsURL returns the NotificationsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetNotificationsURL() string { - if r == nil || r.NotificationsURL == nil { - return "" - } - return *r.NotificationsURL -} - -// GetOpenIssues returns the OpenIssues field if it's non-nil, zero value otherwise. -func (r *Repository) GetOpenIssues() int { - if r == nil || r.OpenIssues == nil { - return 0 - } - return *r.OpenIssues -} - -// GetOpenIssuesCount returns the OpenIssuesCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetOpenIssuesCount() int { - if r == nil || r.OpenIssuesCount == nil { - return 0 - } - return *r.OpenIssuesCount -} - -// GetOrganization returns the Organization field. -func (r *Repository) GetOrganization() *Organization { - if r == nil { - return nil - } - return r.Organization -} - -// GetOwner returns the Owner field. -func (r *Repository) GetOwner() *User { - if r == nil { - return nil - } - return r.Owner -} - -// GetParent returns the Parent field. -func (r *Repository) GetParent() *Repository { - if r == nil { - return nil - } - return r.Parent -} - -// GetPermissions returns the Permissions map if it's non-nil, an empty map otherwise. -func (r *Repository) GetPermissions() map[string]bool { - if r == nil || r.Permissions == nil { - return map[string]bool{} - } - return r.Permissions -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (r *Repository) GetPrivate() bool { - if r == nil || r.Private == nil { - return false - } - return *r.Private -} - -// GetPullsURL returns the PullsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetPullsURL() string { - if r == nil || r.PullsURL == nil { - return "" - } - return *r.PullsURL -} - -// GetPushedAt returns the PushedAt field if it's non-nil, zero value otherwise. -func (r *Repository) GetPushedAt() Timestamp { - if r == nil || r.PushedAt == nil { - return Timestamp{} - } - return *r.PushedAt -} - -// GetReleasesURL returns the ReleasesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetReleasesURL() string { - if r == nil || r.ReleasesURL == nil { - return "" - } - return *r.ReleasesURL -} - -// GetRoleName returns the RoleName field if it's non-nil, zero value otherwise. -func (r *Repository) GetRoleName() string { - if r == nil || r.RoleName == nil { - return "" - } - return *r.RoleName -} - -// GetSecurityAndAnalysis returns the SecurityAndAnalysis field. -func (r *Repository) GetSecurityAndAnalysis() *SecurityAndAnalysis { - if r == nil { - return nil - } - return r.SecurityAndAnalysis -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *Repository) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetSource returns the Source field. -func (r *Repository) GetSource() *Repository { - if r == nil { - return nil - } - return r.Source -} - -// GetSquashMergeCommitMessage returns the SquashMergeCommitMessage field if it's non-nil, zero value otherwise. -func (r *Repository) GetSquashMergeCommitMessage() string { - if r == nil || r.SquashMergeCommitMessage == nil { - return "" - } - return *r.SquashMergeCommitMessage -} - -// GetSquashMergeCommitTitle returns the SquashMergeCommitTitle field if it's non-nil, zero value otherwise. -func (r *Repository) GetSquashMergeCommitTitle() string { - if r == nil || r.SquashMergeCommitTitle == nil { - return "" - } - return *r.SquashMergeCommitTitle -} - -// GetSSHURL returns the SSHURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSSHURL() string { - if r == nil || r.SSHURL == nil { - return "" - } - return *r.SSHURL -} - -// GetStargazersCount returns the StargazersCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetStargazersCount() int { - if r == nil || r.StargazersCount == nil { - return 0 - } - return *r.StargazersCount -} - -// GetStargazersURL returns the StargazersURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetStargazersURL() string { - if r == nil || r.StargazersURL == nil { - return "" - } - return *r.StargazersURL -} - -// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetStatusesURL() string { - if r == nil || r.StatusesURL == nil { - return "" - } - return *r.StatusesURL -} - -// GetSubscribersCount returns the SubscribersCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetSubscribersCount() int { - if r == nil || r.SubscribersCount == nil { - return 0 - } - return *r.SubscribersCount -} - -// GetSubscribersURL returns the SubscribersURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSubscribersURL() string { - if r == nil || r.SubscribersURL == nil { - return "" - } - return *r.SubscribersURL -} - -// GetSubscriptionURL returns the SubscriptionURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSubscriptionURL() string { - if r == nil || r.SubscriptionURL == nil { - return "" - } - return *r.SubscriptionURL -} - -// GetSVNURL returns the SVNURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetSVNURL() string { - if r == nil || r.SVNURL == nil { - return "" - } - return *r.SVNURL -} - -// GetTagsURL returns the TagsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetTagsURL() string { - if r == nil || r.TagsURL == nil { - return "" - } - return *r.TagsURL -} - -// GetTeamID returns the TeamID field if it's non-nil, zero value otherwise. -func (r *Repository) GetTeamID() int64 { - if r == nil || r.TeamID == nil { - return 0 - } - return *r.TeamID -} - -// GetTeamsURL returns the TeamsURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetTeamsURL() string { - if r == nil || r.TeamsURL == nil { - return "" - } - return *r.TeamsURL -} - -// GetTemplateRepository returns the TemplateRepository field. -func (r *Repository) GetTemplateRepository() *Repository { - if r == nil { - return nil - } - return r.TemplateRepository -} - -// GetTreesURL returns the TreesURL field if it's non-nil, zero value otherwise. -func (r *Repository) GetTreesURL() string { - if r == nil || r.TreesURL == nil { - return "" - } - return *r.TreesURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *Repository) GetUpdatedAt() Timestamp { - if r == nil || r.UpdatedAt == nil { - return Timestamp{} - } - return *r.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *Repository) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetUseSquashPRTitleAsDefault returns the UseSquashPRTitleAsDefault field if it's non-nil, zero value otherwise. -func (r *Repository) GetUseSquashPRTitleAsDefault() bool { - if r == nil || r.UseSquashPRTitleAsDefault == nil { - return false - } - return *r.UseSquashPRTitleAsDefault -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (r *Repository) GetVisibility() string { - if r == nil || r.Visibility == nil { - return "" - } - return *r.Visibility -} - -// GetWatchers returns the Watchers field if it's non-nil, zero value otherwise. -func (r *Repository) GetWatchers() int { - if r == nil || r.Watchers == nil { - return 0 - } - return *r.Watchers -} - -// GetWatchersCount returns the WatchersCount field if it's non-nil, zero value otherwise. -func (r *Repository) GetWatchersCount() int { - if r == nil || r.WatchersCount == nil { - return 0 - } - return *r.WatchersCount -} - -// GetWebCommitSignoffRequired returns the WebCommitSignoffRequired field if it's non-nil, zero value otherwise. -func (r *Repository) GetWebCommitSignoffRequired() bool { - if r == nil || r.WebCommitSignoffRequired == nil { - return false - } - return *r.WebCommitSignoffRequired -} - -// GetAccessLevel returns the AccessLevel field if it's non-nil, zero value otherwise. -func (r *RepositoryActionsAccessLevel) GetAccessLevel() string { - if r == nil || r.AccessLevel == nil { - return "" - } - return *r.AccessLevel -} - -// GetAdvancedSecurityCommitters returns the AdvancedSecurityCommitters field if it's non-nil, zero value otherwise. -func (r *RepositoryActiveCommitters) GetAdvancedSecurityCommitters() int { - if r == nil || r.AdvancedSecurityCommitters == nil { - return 0 - } - return *r.AdvancedSecurityCommitters -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryActiveCommitters) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetBody() string { - if r == nil || r.Body == nil { - return "" - } - return *r.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetCommitID() string { - if r == nil || r.CommitID == nil { - return "" - } - return *r.CommitID -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetPath() string { - if r == nil || r.Path == nil { - return "" - } - return *r.Path -} - -// GetPosition returns the Position field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetPosition() int { - if r == nil || r.Position == nil { - return 0 - } - return *r.Position -} - -// GetReactions returns the Reactions field. -func (r *RepositoryComment) GetReactions() *Reactions { - if r == nil { - return nil - } - return r.Reactions -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetUpdatedAt() Timestamp { - if r == nil || r.UpdatedAt == nil { - return Timestamp{} - } - return *r.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetUser returns the User field. -func (r *RepositoryComment) GetUser() *User { - if r == nil { - return nil - } - return r.User -} - -// GetAuthor returns the Author field. -func (r *RepositoryCommit) GetAuthor() *User { - if r == nil { - return nil - } - return r.Author -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetCommentsURL() string { - if r == nil || r.CommentsURL == nil { - return "" - } - return *r.CommentsURL -} - -// GetCommit returns the Commit field. -func (r *RepositoryCommit) GetCommit() *Commit { - if r == nil { - return nil - } - return r.Commit -} - -// GetCommitter returns the Committer field. -func (r *RepositoryCommit) GetCommitter() *User { - if r == nil { - return nil - } - return r.Committer -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetStats returns the Stats field. -func (r *RepositoryCommit) GetStats() *CommitStats { - if r == nil { - return nil - } - return r.Stats -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryCommit) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetDownloadURL() string { - if r == nil || r.DownloadURL == nil { - return "" - } - return *r.DownloadURL -} - -// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetEncoding() string { - if r == nil || r.Encoding == nil { - return "" - } - return *r.Encoding -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetGitURL() string { - if r == nil || r.GitURL == nil { - return "" - } - return *r.GitURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetPath() string { - if r == nil || r.Path == nil { - return "" - } - return *r.Path -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetTarget returns the Target field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetTarget() string { - if r == nil || r.Target == nil { - return "" - } - return *r.Target -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetType() string { - if r == nil || r.Type == nil { - return "" - } - return *r.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryContent) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetAuthor returns the Author field. -func (r *RepositoryContentFileOptions) GetAuthor() *CommitAuthor { - if r == nil { - return nil - } - return r.Author -} - -// GetBranch returns the Branch field if it's non-nil, zero value otherwise. -func (r *RepositoryContentFileOptions) GetBranch() string { - if r == nil || r.Branch == nil { - return "" - } - return *r.Branch -} - -// GetCommitter returns the Committer field. -func (r *RepositoryContentFileOptions) GetCommitter() *CommitAuthor { - if r == nil { - return nil - } - return r.Committer -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (r *RepositoryContentFileOptions) GetMessage() string { - if r == nil || r.Message == nil { - return "" - } - return *r.Message -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryContentFileOptions) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetContent returns the Content field. -func (r *RepositoryContentResponse) GetContent() *RepositoryContent { - if r == nil { - return nil - } - return r.Content -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (r *RepositoryDispatchEvent) GetAction() string { - if r == nil || r.Action == nil { - return "" - } - return *r.Action -} - -// GetBranch returns the Branch field if it's non-nil, zero value otherwise. -func (r *RepositoryDispatchEvent) GetBranch() string { - if r == nil || r.Branch == nil { - return "" - } - return *r.Branch -} - -// GetInstallation returns the Installation field. -func (r *RepositoryDispatchEvent) GetInstallation() *Installation { - if r == nil { - return nil - } - return r.Installation -} - -// GetOrg returns the Org field. -func (r *RepositoryDispatchEvent) GetOrg() *Organization { - if r == nil { - return nil - } - return r.Org -} - -// GetRepo returns the Repo field. -func (r *RepositoryDispatchEvent) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetSender returns the Sender field. -func (r *RepositoryDispatchEvent) GetSender() *User { - if r == nil { - return nil - } - return r.Sender -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (r *RepositoryEvent) GetAction() string { - if r == nil || r.Action == nil { - return "" - } - return *r.Action -} - -// GetChanges returns the Changes field. -func (r *RepositoryEvent) GetChanges() *EditChange { - if r == nil { - return nil - } - return r.Changes -} - -// GetInstallation returns the Installation field. -func (r *RepositoryEvent) GetInstallation() *Installation { - if r == nil { - return nil - } - return r.Installation -} - -// GetOrg returns the Org field. -func (r *RepositoryEvent) GetOrg() *Organization { - if r == nil { - return nil - } - return r.Org -} - -// GetRepo returns the Repo field. -func (r *RepositoryEvent) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetSender returns the Sender field. -func (r *RepositoryEvent) GetSender() *User { - if r == nil { - return nil - } - return r.Sender -} - -// GetOrg returns the Org field. -func (r *RepositoryImportEvent) GetOrg() *Organization { - if r == nil { - return nil - } - return r.Org -} - -// GetRepo returns the Repo field. -func (r *RepositoryImportEvent) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetSender returns the Sender field. -func (r *RepositoryImportEvent) GetSender() *User { - if r == nil { - return nil - } - return r.Sender -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (r *RepositoryImportEvent) GetStatus() string { - if r == nil || r.Status == nil { - return "" - } - return *r.Status -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetInvitee returns the Invitee field. -func (r *RepositoryInvitation) GetInvitee() *User { - if r == nil { - return nil - } - return r.Invitee -} - -// GetInviter returns the Inviter field. -func (r *RepositoryInvitation) GetInviter() *User { - if r == nil { - return nil - } - return r.Inviter -} - -// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetPermissions() string { - if r == nil || r.Permissions == nil { - return "" - } - return *r.Permissions -} - -// GetRepo returns the Repo field. -func (r *RepositoryInvitation) GetRepo() *Repository { - if r == nil { - return nil - } - return r.Repo -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryInvitation) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetContent() string { - if r == nil || r.Content == nil { - return "" - } - return *r.Content -} - -// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetDownloadURL() string { - if r == nil || r.DownloadURL == nil { - return "" - } - return *r.DownloadURL -} - -// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetEncoding() string { - if r == nil || r.Encoding == nil { - return "" - } - return *r.Encoding -} - -// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetGitURL() string { - if r == nil || r.GitURL == nil { - return "" - } - return *r.GitURL -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetLicense returns the License field. -func (r *RepositoryLicense) GetLicense() *License { - if r == nil { - return nil - } - return r.License -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetPath() string { - if r == nil || r.Path == nil { - return "" - } - return *r.Path -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetSHA() string { - if r == nil || r.SHA == nil { - return "" - } - return *r.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetSize() int { - if r == nil || r.Size == nil { - return 0 - } - return *r.Size -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetType() string { - if r == nil || r.Type == nil { - return "" - } - return *r.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryLicense) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetBase returns the Base field if it's non-nil, zero value otherwise. -func (r *RepositoryMergeRequest) GetBase() string { - if r == nil || r.Base == nil { - return "" - } - return *r.Base -} - -// GetCommitMessage returns the CommitMessage field if it's non-nil, zero value otherwise. -func (r *RepositoryMergeRequest) GetCommitMessage() string { - if r == nil || r.CommitMessage == nil { - return "" - } - return *r.CommitMessage -} - -// GetHead returns the Head field if it's non-nil, zero value otherwise. -func (r *RepositoryMergeRequest) GetHead() string { - if r == nil || r.Head == nil { - return "" - } - return *r.Head -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (r *RepositoryPermissionLevel) GetPermission() string { - if r == nil || r.Permission == nil { - return "" - } - return *r.Permission -} - -// GetUser returns the User field. -func (r *RepositoryPermissionLevel) GetUser() *User { - if r == nil { - return nil - } - return r.User -} - -// GetAssetsURL returns the AssetsURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetAssetsURL() string { - if r == nil || r.AssetsURL == nil { - return "" - } - return *r.AssetsURL -} - -// GetAuthor returns the Author field. -func (r *RepositoryRelease) GetAuthor() *User { - if r == nil { - return nil - } - return r.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetBody() string { - if r == nil || r.Body == nil { - return "" - } - return *r.Body -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetDiscussionCategoryName returns the DiscussionCategoryName field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetDiscussionCategoryName() string { - if r == nil || r.DiscussionCategoryName == nil { - return "" - } - return *r.DiscussionCategoryName -} - -// GetDraft returns the Draft field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetDraft() bool { - if r == nil || r.Draft == nil { - return false - } - return *r.Draft -} - -// GetGenerateReleaseNotes returns the GenerateReleaseNotes field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetGenerateReleaseNotes() bool { - if r == nil || r.GenerateReleaseNotes == nil { - return false - } - return *r.GenerateReleaseNotes -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetHTMLURL() string { - if r == nil || r.HTMLURL == nil { - return "" - } - return *r.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetMakeLatest returns the MakeLatest field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetMakeLatest() string { - if r == nil || r.MakeLatest == nil { - return "" - } - return *r.MakeLatest -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetPrerelease returns the Prerelease field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetPrerelease() bool { - if r == nil || r.Prerelease == nil { - return false - } - return *r.Prerelease -} - -// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetPublishedAt() Timestamp { - if r == nil || r.PublishedAt == nil { - return Timestamp{} - } - return *r.PublishedAt -} - -// GetTagName returns the TagName field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetTagName() string { - if r == nil || r.TagName == nil { - return "" - } - return *r.TagName -} - -// GetTarballURL returns the TarballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetTarballURL() string { - if r == nil || r.TarballURL == nil { - return "" - } - return *r.TarballURL -} - -// GetTargetCommitish returns the TargetCommitish field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetTargetCommitish() string { - if r == nil || r.TargetCommitish == nil { - return "" - } - return *r.TargetCommitish -} - -// GetUploadURL returns the UploadURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetUploadURL() string { - if r == nil || r.UploadURL == nil { - return "" - } - return *r.UploadURL -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetZipballURL returns the ZipballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryRelease) GetZipballURL() string { - if r == nil || r.ZipballURL == nil { - return "" - } - return *r.ZipballURL -} - -// GetParameters returns the Parameters field if it's non-nil, zero value otherwise. -func (r *RepositoryRule) GetParameters() json.RawMessage { - if r == nil || r.Parameters == nil { - return json.RawMessage{} - } - return *r.Parameters -} - -// GetCommit returns the Commit field. -func (r *RepositoryTag) GetCommit() *Commit { - if r == nil { - return nil - } - return r.Commit -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RepositoryTag) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetTarballURL returns the TarballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryTag) GetTarballURL() string { - if r == nil || r.TarballURL == nil { - return "" - } - return *r.TarballURL -} - -// GetZipballURL returns the ZipballURL field if it's non-nil, zero value otherwise. -func (r *RepositoryTag) GetZipballURL() string { - if r == nil || r.ZipballURL == nil { - return "" - } - return *r.ZipballURL -} - -// GetAffectedPackageName returns the AffectedPackageName field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetAffectedPackageName() string { - if r == nil || r.AffectedPackageName == nil { - return "" - } - return *r.AffectedPackageName -} - -// GetAffectedRange returns the AffectedRange field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetAffectedRange() string { - if r == nil || r.AffectedRange == nil { - return "" - } - return *r.AffectedRange -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetDismissedAt returns the DismissedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetDismissedAt() Timestamp { - if r == nil || r.DismissedAt == nil { - return Timestamp{} - } - return *r.DismissedAt -} - -// GetDismisser returns the Dismisser field. -func (r *RepositoryVulnerabilityAlert) GetDismisser() *User { - if r == nil { - return nil - } - return r.Dismisser -} - -// GetDismissReason returns the DismissReason field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetDismissReason() string { - if r == nil || r.DismissReason == nil { - return "" - } - return *r.DismissReason -} - -// GetExternalIdentifier returns the ExternalIdentifier field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetExternalIdentifier() string { - if r == nil || r.ExternalIdentifier == nil { - return "" - } - return *r.ExternalIdentifier -} - -// GetExternalReference returns the ExternalReference field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetExternalReference() string { - if r == nil || r.ExternalReference == nil { - return "" - } - return *r.ExternalReference -} - -// GetFixedIn returns the FixedIn field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetFixedIn() string { - if r == nil || r.FixedIn == nil { - return "" - } - return *r.FixedIn -} - -// GetGitHubSecurityAdvisoryID returns the GitHubSecurityAdvisoryID field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetGitHubSecurityAdvisoryID() string { - if r == nil || r.GitHubSecurityAdvisoryID == nil { - return "" - } - return *r.GitHubSecurityAdvisoryID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlert) GetSeverity() string { - if r == nil || r.Severity == nil { - return "" - } - return *r.Severity -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (r *RepositoryVulnerabilityAlertEvent) GetAction() string { - if r == nil || r.Action == nil { - return "" - } - return *r.Action -} - -// GetAlert returns the Alert field. -func (r *RepositoryVulnerabilityAlertEvent) GetAlert() *RepositoryVulnerabilityAlert { - if r == nil { - return nil - } - return r.Alert -} - -// GetInstallation returns the Installation field. -func (r *RepositoryVulnerabilityAlertEvent) GetInstallation() *Installation { - if r == nil { - return nil - } - return r.Installation -} - -// GetRepository returns the Repository field. -func (r *RepositoryVulnerabilityAlertEvent) GetRepository() *Repository { - if r == nil { - return nil - } - return r.Repository -} - -// GetSender returns the Sender field. -func (r *RepositoryVulnerabilityAlertEvent) GetSender() *User { - if r == nil { - return nil - } - return r.Sender -} - -// GetForkRepos returns the ForkRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetForkRepos() int { - if r == nil || r.ForkRepos == nil { - return 0 - } - return *r.ForkRepos -} - -// GetOrgRepos returns the OrgRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetOrgRepos() int { - if r == nil || r.OrgRepos == nil { - return 0 - } - return *r.OrgRepos -} - -// GetRootRepos returns the RootRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetRootRepos() int { - if r == nil || r.RootRepos == nil { - return 0 - } - return *r.RootRepos -} - -// GetTotalPushes returns the TotalPushes field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetTotalPushes() int { - if r == nil || r.TotalPushes == nil { - return 0 - } - return *r.TotalPushes -} - -// GetTotalRepos returns the TotalRepos field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetTotalRepos() int { - if r == nil || r.TotalRepos == nil { - return 0 - } - return *r.TotalRepos -} - -// GetTotalWikis returns the TotalWikis field if it's non-nil, zero value otherwise. -func (r *RepoStats) GetTotalWikis() int { - if r == nil || r.TotalWikis == nil { - return 0 - } - return *r.TotalWikis -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetAvatarURL() string { - if r == nil || r.AvatarURL == nil { - return "" - } - return *r.AvatarURL -} - -// GetContext returns the Context field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetContext() string { - if r == nil || r.Context == nil { - return "" - } - return *r.Context -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetCreatedAt() Timestamp { - if r == nil || r.CreatedAt == nil { - return Timestamp{} - } - return *r.CreatedAt -} - -// GetCreator returns the Creator field. -func (r *RepoStatus) GetCreator() *User { - if r == nil { - return nil - } - return r.Creator -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetDescription() string { - if r == nil || r.Description == nil { - return "" - } - return *r.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetState() string { - if r == nil || r.State == nil { - return "" - } - return *r.State -} - -// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetTargetURL() string { - if r == nil || r.TargetURL == nil { - return "" - } - return *r.TargetURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetUpdatedAt() Timestamp { - if r == nil || r.UpdatedAt == nil { - return Timestamp{} - } - return *r.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (r *RequireCodeOwnerReviewChanges) GetFrom() bool { - if r == nil || r.From == nil { - return false - } - return *r.From -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (r *RequiredConversationResolutionLevelChanges) GetFrom() string { - if r == nil || r.From == nil { - return "" - } - return *r.From -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (r *RequiredDeploymentsEnforcementLevelChanges) GetFrom() string { - if r == nil || r.From == nil { - return "" - } - return *r.From -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (r *RequiredReviewer) GetType() string { - if r == nil || r.Type == nil { - return "" - } - return *r.Type -} - -// GetAppID returns the AppID field if it's non-nil, zero value otherwise. -func (r *RequiredStatusCheck) GetAppID() int64 { - if r == nil || r.AppID == nil { - return 0 - } - return *r.AppID -} - -// GetContextsURL returns the ContextsURL field if it's non-nil, zero value otherwise. -func (r *RequiredStatusChecks) GetContextsURL() string { - if r == nil || r.ContextsURL == nil { - return "" - } - return *r.ContextsURL -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (r *RequiredStatusChecks) GetURL() string { - if r == nil || r.URL == nil { - return "" - } - return *r.URL -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (r *RequiredStatusChecksEnforcementLevelChanges) GetFrom() string { - if r == nil || r.From == nil { - return "" - } - return *r.From -} - -// GetStrict returns the Strict field if it's non-nil, zero value otherwise. -func (r *RequiredStatusChecksRequest) GetStrict() bool { - if r == nil || r.Strict == nil { - return false - } - return *r.Strict -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (r *RequiredWorkflowSelectedRepos) GetTotalCount() int { - if r == nil || r.TotalCount == nil { - return 0 - } - return *r.TotalCount -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *ReviewersRequest) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (r *Rule) GetDescription() string { - if r == nil || r.Description == nil { - return "" - } - return *r.Description -} - -// GetFullDescription returns the FullDescription field if it's non-nil, zero value otherwise. -func (r *Rule) GetFullDescription() string { - if r == nil || r.FullDescription == nil { - return "" - } - return *r.FullDescription -} - -// GetHelp returns the Help field if it's non-nil, zero value otherwise. -func (r *Rule) GetHelp() string { - if r == nil || r.Help == nil { - return "" - } - return *r.Help -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *Rule) GetID() string { - if r == nil || r.ID == nil { - return "" - } - return *r.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *Rule) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetSecuritySeverityLevel returns the SecuritySeverityLevel field if it's non-nil, zero value otherwise. -func (r *Rule) GetSecuritySeverityLevel() string { - if r == nil || r.SecuritySeverityLevel == nil { - return "" - } - return *r.SecuritySeverityLevel -} - -// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. -func (r *Rule) GetSeverity() string { - if r == nil || r.Severity == nil { - return "" - } - return *r.Severity -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RulePatternParameters) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetNegate returns the Negate field if it's non-nil, zero value otherwise. -func (r *RulePatternParameters) GetNegate() bool { - if r == nil || r.Negate == nil { - return false - } - return *r.Negate -} - -// GetIntegrationID returns the IntegrationID field if it's non-nil, zero value otherwise. -func (r *RuleRequiredStatusChecks) GetIntegrationID() int64 { - if r == nil || r.IntegrationID == nil { - return 0 - } - return *r.IntegrationID -} - -// GetBypassMode returns the BypassMode field if it's non-nil, zero value otherwise. -func (r *Ruleset) GetBypassMode() string { - if r == nil || r.BypassMode == nil { - return "" - } - return *r.BypassMode -} - -// GetConditions returns the Conditions field. -func (r *Ruleset) GetConditions() *RulesetConditions { - if r == nil { - return nil - } - return r.Conditions -} - -// GetLinks returns the Links field. -func (r *Ruleset) GetLinks() *RulesetLinks { - if r == nil { - return nil - } - return r.Links -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (r *Ruleset) GetNodeID() string { - if r == nil || r.NodeID == nil { - return "" - } - return *r.NodeID -} - -// GetSourceType returns the SourceType field if it's non-nil, zero value otherwise. -func (r *Ruleset) GetSourceType() string { - if r == nil || r.SourceType == nil { - return "" - } - return *r.SourceType -} - -// GetTarget returns the Target field if it's non-nil, zero value otherwise. -func (r *Ruleset) GetTarget() string { - if r == nil || r.Target == nil { - return "" - } - return *r.Target -} - -// GetRefName returns the RefName field. -func (r *RulesetConditions) GetRefName() *RulesetRefConditionParameters { - if r == nil { - return nil - } - return r.RefName -} - -// GetRepositoryName returns the RepositoryName field. -func (r *RulesetConditions) GetRepositoryName() *RulesetRepositoryConditionParameters { - if r == nil { - return nil - } - return r.RepositoryName -} - -// GetHRef returns the HRef field if it's non-nil, zero value otherwise. -func (r *RulesetLink) GetHRef() string { - if r == nil || r.HRef == nil { - return "" - } - return *r.HRef -} - -// GetSelf returns the Self field. -func (r *RulesetLinks) GetSelf() *RulesetLink { - if r == nil { - return nil - } - return r.Self -} - -// GetProtected returns the Protected field if it's non-nil, zero value otherwise. -func (r *RulesetRepositoryConditionParameters) GetProtected() bool { - if r == nil || r.Protected == nil { - return false - } - return *r.Protected -} - -// GetBusy returns the Busy field if it's non-nil, zero value otherwise. -func (r *Runner) GetBusy() bool { - if r == nil || r.Busy == nil { - return false - } - return *r.Busy -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *Runner) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *Runner) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetOS returns the OS field if it's non-nil, zero value otherwise. -func (r *Runner) GetOS() string { - if r == nil || r.OS == nil { - return "" - } - return *r.OS -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (r *Runner) GetStatus() string { - if r == nil || r.Status == nil { - return "" - } - return *r.Status -} - -// GetArchitecture returns the Architecture field if it's non-nil, zero value otherwise. -func (r *RunnerApplicationDownload) GetArchitecture() string { - if r == nil || r.Architecture == nil { - return "" - } - return *r.Architecture -} - -// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise. -func (r *RunnerApplicationDownload) GetDownloadURL() string { - if r == nil || r.DownloadURL == nil { - return "" - } - return *r.DownloadURL -} - -// GetFilename returns the Filename field if it's non-nil, zero value otherwise. -func (r *RunnerApplicationDownload) GetFilename() string { - if r == nil || r.Filename == nil { - return "" - } - return *r.Filename -} - -// GetOS returns the OS field if it's non-nil, zero value otherwise. -func (r *RunnerApplicationDownload) GetOS() string { - if r == nil || r.OS == nil { - return "" - } - return *r.OS -} - -// GetSHA256Checksum returns the SHA256Checksum field if it's non-nil, zero value otherwise. -func (r *RunnerApplicationDownload) GetSHA256Checksum() string { - if r == nil || r.SHA256Checksum == nil { - return "" - } - return *r.SHA256Checksum -} - -// GetTempDownloadToken returns the TempDownloadToken field if it's non-nil, zero value otherwise. -func (r *RunnerApplicationDownload) GetTempDownloadToken() string { - if r == nil || r.TempDownloadToken == nil { - return "" - } - return *r.TempDownloadToken -} - -// GetAllowsPublicRepositories returns the AllowsPublicRepositories field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetAllowsPublicRepositories() bool { - if r == nil || r.AllowsPublicRepositories == nil { - return false - } - return *r.AllowsPublicRepositories -} - -// GetDefault returns the Default field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetDefault() bool { - if r == nil || r.Default == nil { - return false - } - return *r.Default -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetInherited returns the Inherited field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetInherited() bool { - if r == nil || r.Inherited == nil { - return false - } - return *r.Inherited -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetRestrictedToWorkflows returns the RestrictedToWorkflows field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetRestrictedToWorkflows() bool { - if r == nil || r.RestrictedToWorkflows == nil { - return false - } - return *r.RestrictedToWorkflows -} - -// GetRunnersURL returns the RunnersURL field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetRunnersURL() string { - if r == nil || r.RunnersURL == nil { - return "" - } - return *r.RunnersURL -} - -// GetSelectedRepositoriesURL returns the SelectedRepositoriesURL field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetSelectedRepositoriesURL() string { - if r == nil || r.SelectedRepositoriesURL == nil { - return "" - } - return *r.SelectedRepositoriesURL -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetVisibility() string { - if r == nil || r.Visibility == nil { - return "" - } - return *r.Visibility -} - -// GetWorkflowRestrictionsReadOnly returns the WorkflowRestrictionsReadOnly field if it's non-nil, zero value otherwise. -func (r *RunnerGroup) GetWorkflowRestrictionsReadOnly() bool { - if r == nil || r.WorkflowRestrictionsReadOnly == nil { - return false - } - return *r.WorkflowRestrictionsReadOnly -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (r *RunnerLabels) GetID() int64 { - if r == nil || r.ID == nil { - return 0 - } - return *r.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (r *RunnerLabels) GetName() string { - if r == nil || r.Name == nil { - return "" - } - return *r.Name -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (r *RunnerLabels) GetType() string { - if r == nil || r.Type == nil { - return "" - } - return *r.Type -} - -// GetCheckoutURI returns the CheckoutURI field if it's non-nil, zero value otherwise. -func (s *SarifAnalysis) GetCheckoutURI() string { - if s == nil || s.CheckoutURI == nil { - return "" - } - return *s.CheckoutURI -} - -// GetCommitSHA returns the CommitSHA field if it's non-nil, zero value otherwise. -func (s *SarifAnalysis) GetCommitSHA() string { - if s == nil || s.CommitSHA == nil { - return "" - } - return *s.CommitSHA -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (s *SarifAnalysis) GetRef() string { - if s == nil || s.Ref == nil { - return "" - } - return *s.Ref -} - -// GetSarif returns the Sarif field if it's non-nil, zero value otherwise. -func (s *SarifAnalysis) GetSarif() string { - if s == nil || s.Sarif == nil { - return "" - } - return *s.Sarif -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (s *SarifAnalysis) GetStartedAt() Timestamp { - if s == nil || s.StartedAt == nil { - return Timestamp{} - } - return *s.StartedAt -} - -// GetToolName returns the ToolName field if it's non-nil, zero value otherwise. -func (s *SarifAnalysis) GetToolName() string { - if s == nil || s.ToolName == nil { - return "" - } - return *s.ToolName -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *SarifID) GetID() string { - if s == nil || s.ID == nil { - return "" - } - return *s.ID -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *SarifID) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetAnalysisKey returns the AnalysisKey field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetAnalysisKey() string { - if s == nil || s.AnalysisKey == nil { - return "" - } - return *s.AnalysisKey -} - -// GetCategory returns the Category field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetCategory() string { - if s == nil || s.Category == nil { - return "" - } - return *s.Category -} - -// GetCommitSHA returns the CommitSHA field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetCommitSHA() string { - if s == nil || s.CommitSHA == nil { - return "" - } - return *s.CommitSHA -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetCreatedAt() Timestamp { - if s == nil || s.CreatedAt == nil { - return Timestamp{} - } - return *s.CreatedAt -} - -// GetDeletable returns the Deletable field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetDeletable() bool { - if s == nil || s.Deletable == nil { - return false - } - return *s.Deletable -} - -// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetEnvironment() string { - if s == nil || s.Environment == nil { - return "" - } - return *s.Environment -} - -// GetError returns the Error field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetError() string { - if s == nil || s.Error == nil { - return "" - } - return *s.Error -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetRef() string { - if s == nil || s.Ref == nil { - return "" - } - return *s.Ref -} - -// GetResultsCount returns the ResultsCount field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetResultsCount() int { - if s == nil || s.ResultsCount == nil { - return 0 - } - return *s.ResultsCount -} - -// GetRulesCount returns the RulesCount field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetRulesCount() int { - if s == nil || s.RulesCount == nil { - return 0 - } - return *s.RulesCount -} - -// GetSarifID returns the SarifID field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetSarifID() string { - if s == nil || s.SarifID == nil { - return "" - } - return *s.SarifID -} - -// GetTool returns the Tool field. -func (s *ScanningAnalysis) GetTool() *Tool { - if s == nil { - return nil - } - return s.Tool -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetWarning returns the Warning field if it's non-nil, zero value otherwise. -func (s *ScanningAnalysis) GetWarning() string { - if s == nil || s.Warning == nil { - return "" - } - return *s.Warning -} - -// GetCreated returns the Created field if it's non-nil, zero value otherwise. -func (s *SCIMMeta) GetCreated() Timestamp { - if s == nil || s.Created == nil { - return Timestamp{} - } - return *s.Created -} - -// GetLastModified returns the LastModified field if it's non-nil, zero value otherwise. -func (s *SCIMMeta) GetLastModified() Timestamp { - if s == nil || s.LastModified == nil { - return Timestamp{} - } - return *s.LastModified -} - -// GetLocation returns the Location field if it's non-nil, zero value otherwise. -func (s *SCIMMeta) GetLocation() string { - if s == nil || s.Location == nil { - return "" - } - return *s.Location -} - -// GetResourceType returns the ResourceType field if it's non-nil, zero value otherwise. -func (s *SCIMMeta) GetResourceType() string { - if s == nil || s.ResourceType == nil { - return "" - } - return *s.ResourceType -} - -// GetItemsPerPage returns the ItemsPerPage field if it's non-nil, zero value otherwise. -func (s *SCIMProvisionedIdentities) GetItemsPerPage() int { - if s == nil || s.ItemsPerPage == nil { - return 0 - } - return *s.ItemsPerPage -} - -// GetStartIndex returns the StartIndex field if it's non-nil, zero value otherwise. -func (s *SCIMProvisionedIdentities) GetStartIndex() int { - if s == nil || s.StartIndex == nil { - return 0 - } - return *s.StartIndex -} - -// GetTotalResults returns the TotalResults field if it's non-nil, zero value otherwise. -func (s *SCIMProvisionedIdentities) GetTotalResults() int { - if s == nil || s.TotalResults == nil { - return 0 - } - return *s.TotalResults -} - -// GetActive returns the Active field if it's non-nil, zero value otherwise. -func (s *SCIMUserAttributes) GetActive() bool { - if s == nil || s.Active == nil { - return false - } - return *s.Active -} - -// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. -func (s *SCIMUserAttributes) GetDisplayName() string { - if s == nil || s.DisplayName == nil { - return "" - } - return *s.DisplayName -} - -// GetExternalID returns the ExternalID field if it's non-nil, zero value otherwise. -func (s *SCIMUserAttributes) GetExternalID() string { - if s == nil || s.ExternalID == nil { - return "" - } - return *s.ExternalID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *SCIMUserAttributes) GetID() string { - if s == nil || s.ID == nil { - return "" - } - return *s.ID -} - -// GetMeta returns the Meta field. -func (s *SCIMUserAttributes) GetMeta() *SCIMMeta { - if s == nil { - return nil - } - return s.Meta -} - -// GetPrimary returns the Primary field if it's non-nil, zero value otherwise. -func (s *SCIMUserEmail) GetPrimary() bool { - if s == nil || s.Primary == nil { - return false - } - return *s.Primary -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (s *SCIMUserEmail) GetType() string { - if s == nil || s.Type == nil { - return "" - } - return *s.Type -} - -// GetFormatted returns the Formatted field if it's non-nil, zero value otherwise. -func (s *SCIMUserName) GetFormatted() string { - if s == nil || s.Formatted == nil { - return "" - } - return *s.Formatted -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (s *SecretScanning) GetStatus() string { - if s == nil || s.Status == nil { - return "" - } - return *s.Status -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetCreatedAt() Timestamp { - if s == nil || s.CreatedAt == nil { - return Timestamp{} - } - return *s.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetHTMLURL() string { - if s == nil || s.HTMLURL == nil { - return "" - } - return *s.HTMLURL -} - -// GetLocationsURL returns the LocationsURL field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetLocationsURL() string { - if s == nil || s.LocationsURL == nil { - return "" - } - return *s.LocationsURL -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetNumber() int { - if s == nil || s.Number == nil { - return 0 - } - return *s.Number -} - -// GetResolution returns the Resolution field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetResolution() string { - if s == nil || s.Resolution == nil { - return "" - } - return *s.Resolution -} - -// GetResolvedAt returns the ResolvedAt field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetResolvedAt() Timestamp { - if s == nil || s.ResolvedAt == nil { - return Timestamp{} - } - return *s.ResolvedAt -} - -// GetResolvedBy returns the ResolvedBy field. -func (s *SecretScanningAlert) GetResolvedBy() *User { - if s == nil { - return nil - } - return s.ResolvedBy -} - -// GetSecret returns the Secret field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetSecret() string { - if s == nil || s.Secret == nil { - return "" - } - return *s.Secret -} - -// GetSecretType returns the SecretType field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetSecretType() string { - if s == nil || s.SecretType == nil { - return "" - } - return *s.SecretType -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetState() string { - if s == nil || s.State == nil { - return "" - } - return *s.State -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlert) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertEvent) GetAction() string { - if s == nil || s.Action == nil { - return "" - } - return *s.Action -} - -// GetAlert returns the Alert field. -func (s *SecretScanningAlertEvent) GetAlert() *SecretScanningAlert { - if s == nil { - return nil - } - return s.Alert -} - -// GetEnterprise returns the Enterprise field. -func (s *SecretScanningAlertEvent) GetEnterprise() *Enterprise { - if s == nil { - return nil - } - return s.Enterprise -} - -// GetInstallation returns the Installation field. -func (s *SecretScanningAlertEvent) GetInstallation() *Installation { - if s == nil { - return nil - } - return s.Installation -} - -// GetOrganization returns the Organization field. -func (s *SecretScanningAlertEvent) GetOrganization() *Organization { - if s == nil { - return nil - } - return s.Organization -} - -// GetRepo returns the Repo field. -func (s *SecretScanningAlertEvent) GetRepo() *Repository { - if s == nil { - return nil - } - return s.Repo -} - -// GetSender returns the Sender field. -func (s *SecretScanningAlertEvent) GetSender() *User { - if s == nil { - return nil - } - return s.Sender -} - -// GetDetails returns the Details field. -func (s *SecretScanningAlertLocation) GetDetails() *SecretScanningAlertLocationDetails { - if s == nil { - return nil - } - return s.Details -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocation) GetType() string { - if s == nil || s.Type == nil { - return "" - } - return *s.Type -} - -// GetBlobSHA returns the BlobSHA field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetBlobSHA() string { - if s == nil || s.BlobSHA == nil { - return "" - } - return *s.BlobSHA -} - -// GetBlobURL returns the BlobURL field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetBlobURL() string { - if s == nil || s.BlobURL == nil { - return "" - } - return *s.BlobURL -} - -// GetCommitSHA returns the CommitSHA field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetCommitSHA() string { - if s == nil || s.CommitSHA == nil { - return "" - } - return *s.CommitSHA -} - -// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetCommitURL() string { - if s == nil || s.CommitURL == nil { - return "" - } - return *s.CommitURL -} - -// GetEndColumn returns the EndColumn field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetEndColumn() int { - if s == nil || s.EndColumn == nil { - return 0 - } - return *s.EndColumn -} - -// GetEndLine returns the EndLine field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetEndLine() int { - if s == nil || s.EndLine == nil { - return 0 - } - return *s.EndLine -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetPath() string { - if s == nil || s.Path == nil { - return "" - } - return *s.Path -} - -// GetStartColumn returns the StartColumn field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetStartColumn() int { - if s == nil || s.StartColumn == nil { - return 0 - } - return *s.StartColumn -} - -// GetStartline returns the Startline field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertLocationDetails) GetStartline() int { - if s == nil || s.Startline == nil { - return 0 - } - return *s.Startline -} - -// GetResolution returns the Resolution field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertUpdateOptions) GetResolution() string { - if s == nil || s.Resolution == nil { - return "" - } - return *s.Resolution -} - -// GetSecretType returns the SecretType field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertUpdateOptions) GetSecretType() string { - if s == nil || s.SecretType == nil { - return "" - } - return *s.SecretType -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (s *SecretScanningAlertUpdateOptions) GetState() string { - if s == nil || s.State == nil { - return "" - } - return *s.State -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (s *SecretScanningPushProtection) GetStatus() string { - if s == nil || s.Status == nil { - return "" - } - return *s.Status -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisory) GetDescription() string { - if s == nil || s.Description == nil { - return "" - } - return *s.Description -} - -// GetGHSAID returns the GHSAID field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisory) GetGHSAID() string { - if s == nil || s.GHSAID == nil { - return "" - } - return *s.GHSAID -} - -// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisory) GetPublishedAt() Timestamp { - if s == nil || s.PublishedAt == nil { - return Timestamp{} - } - return *s.PublishedAt -} - -// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisory) GetSeverity() string { - if s == nil || s.Severity == nil { - return "" - } - return *s.Severity -} - -// GetSummary returns the Summary field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisory) GetSummary() string { - if s == nil || s.Summary == nil { - return "" - } - return *s.Summary -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisory) GetUpdatedAt() Timestamp { - if s == nil || s.UpdatedAt == nil { - return Timestamp{} - } - return *s.UpdatedAt -} - -// GetWithdrawnAt returns the WithdrawnAt field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisory) GetWithdrawnAt() Timestamp { - if s == nil || s.WithdrawnAt == nil { - return Timestamp{} - } - return *s.WithdrawnAt -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (s *SecurityAdvisoryEvent) GetAction() string { - if s == nil || s.Action == nil { - return "" - } - return *s.Action -} - -// GetSecurityAdvisory returns the SecurityAdvisory field. -func (s *SecurityAdvisoryEvent) GetSecurityAdvisory() *SecurityAdvisory { - if s == nil { - return nil - } - return s.SecurityAdvisory -} - -// GetAdvancedSecurity returns the AdvancedSecurity field. -func (s *SecurityAndAnalysis) GetAdvancedSecurity() *AdvancedSecurity { - if s == nil { - return nil - } - return s.AdvancedSecurity -} - -// GetSecretScanning returns the SecretScanning field. -func (s *SecurityAndAnalysis) GetSecretScanning() *SecretScanning { - if s == nil { - return nil - } - return s.SecretScanning -} - -// GetSecretScanningPushProtection returns the SecretScanningPushProtection field. -func (s *SecurityAndAnalysis) GetSecretScanningPushProtection() *SecretScanningPushProtection { - if s == nil { - return nil - } - return s.SecretScanningPushProtection -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (s *SelectedReposList) GetTotalCount() int { - if s == nil || s.TotalCount == nil { - return 0 - } - return *s.TotalCount -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (s *ServiceHook) GetName() string { - if s == nil || s.Name == nil { - return "" - } - return *s.Name -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (s *SignatureRequirementEnforcementLevelChanges) GetFrom() string { - if s == nil || s.From == nil { - return "" - } - return *s.From -} - -// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. -func (s *SignaturesProtectedBranch) GetEnabled() bool { - if s == nil || s.Enabled == nil { - return false - } - return *s.Enabled -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *SignaturesProtectedBranch) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetPayload returns the Payload field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetPayload() string { - if s == nil || s.Payload == nil { - return "" - } - return *s.Payload -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetReason() string { - if s == nil || s.Reason == nil { - return "" - } - return *s.Reason -} - -// GetSignature returns the Signature field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetSignature() string { - if s == nil || s.Signature == nil { - return "" - } - return *s.Signature -} - -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetVerified() bool { - if s == nil || s.Verified == nil { - return false - } - return *s.Verified -} - -// GetActor returns the Actor field. -func (s *Source) GetActor() *User { - if s == nil { - return nil - } - return s.Actor -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *Source) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetIssue returns the Issue field. -func (s *Source) GetIssue() *Issue { - if s == nil { - return nil - } - return s.Issue -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (s *Source) GetType() string { - if s == nil || s.Type == nil { - return "" - } - return *s.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *Source) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetEmail() string { - if s == nil || s.Email == nil { - return "" - } - return *s.Email -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetImportURL returns the ImportURL field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetImportURL() string { - if s == nil || s.ImportURL == nil { - return "" - } - return *s.ImportURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetName() string { - if s == nil || s.Name == nil { - return "" - } - return *s.Name -} - -// GetRemoteID returns the RemoteID field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetRemoteID() string { - if s == nil || s.RemoteID == nil { - return "" - } - return *s.RemoteID -} - -// GetRemoteName returns the RemoteName field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetRemoteName() string { - if s == nil || s.RemoteName == nil { - return "" - } - return *s.RemoteName -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *SourceImportAuthor) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (s *SSHSigningKey) GetCreatedAt() Timestamp { - if s == nil || s.CreatedAt == nil { - return Timestamp{} - } - return *s.CreatedAt -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *SSHSigningKey) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetKey returns the Key field if it's non-nil, zero value otherwise. -func (s *SSHSigningKey) GetKey() string { - if s == nil || s.Key == nil { - return "" - } - return *s.Key -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (s *SSHSigningKey) GetTitle() string { - if s == nil || s.Title == nil { - return "" - } - return *s.Title -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (s *StarEvent) GetAction() string { - if s == nil || s.Action == nil { - return "" - } - return *s.Action -} - -// GetInstallation returns the Installation field. -func (s *StarEvent) GetInstallation() *Installation { - if s == nil { - return nil - } - return s.Installation -} - -// GetOrg returns the Org field. -func (s *StarEvent) GetOrg() *Organization { - if s == nil { - return nil - } - return s.Org -} - -// GetRepo returns the Repo field. -func (s *StarEvent) GetRepo() *Repository { - if s == nil { - return nil - } - return s.Repo -} - -// GetSender returns the Sender field. -func (s *StarEvent) GetSender() *User { - if s == nil { - return nil - } - return s.Sender -} - -// GetStarredAt returns the StarredAt field if it's non-nil, zero value otherwise. -func (s *StarEvent) GetStarredAt() Timestamp { - if s == nil || s.StarredAt == nil { - return Timestamp{} - } - return *s.StarredAt -} - -// GetStarredAt returns the StarredAt field if it's non-nil, zero value otherwise. -func (s *Stargazer) GetStarredAt() Timestamp { - if s == nil || s.StarredAt == nil { - return Timestamp{} - } - return *s.StarredAt -} - -// GetUser returns the User field. -func (s *Stargazer) GetUser() *User { - if s == nil { - return nil - } - return s.User -} - -// GetRepository returns the Repository field. -func (s *StarredRepository) GetRepository() *Repository { - if s == nil { - return nil - } - return s.Repository -} - -// GetStarredAt returns the StarredAt field if it's non-nil, zero value otherwise. -func (s *StarredRepository) GetStarredAt() Timestamp { - if s == nil || s.StarredAt == nil { - return Timestamp{} - } - return *s.StarredAt -} - -// GetCommit returns the Commit field. -func (s *StatusEvent) GetCommit() *RepositoryCommit { - if s == nil { - return nil - } - return s.Commit -} - -// GetContext returns the Context field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetContext() string { - if s == nil || s.Context == nil { - return "" - } - return *s.Context -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetCreatedAt() Timestamp { - if s == nil || s.CreatedAt == nil { - return Timestamp{} - } - return *s.CreatedAt -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetDescription() string { - if s == nil || s.Description == nil { - return "" - } - return *s.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetID() int64 { - if s == nil || s.ID == nil { - return 0 - } - return *s.ID -} - -// GetInstallation returns the Installation field. -func (s *StatusEvent) GetInstallation() *Installation { - if s == nil { - return nil - } - return s.Installation -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetName() string { - if s == nil || s.Name == nil { - return "" - } - return *s.Name -} - -// GetRepo returns the Repo field. -func (s *StatusEvent) GetRepo() *Repository { - if s == nil { - return nil - } - return s.Repo -} - -// GetSender returns the Sender field. -func (s *StatusEvent) GetSender() *User { - if s == nil { - return nil - } - return s.Sender -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetSHA() string { - if s == nil || s.SHA == nil { - return "" - } - return *s.SHA -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetState() string { - if s == nil || s.State == nil { - return "" - } - return *s.State -} - -// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetTargetURL() string { - if s == nil || s.TargetURL == nil { - return "" - } - return *s.TargetURL -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (s *StatusEvent) GetUpdatedAt() Timestamp { - if s == nil || s.UpdatedAt == nil { - return Timestamp{} - } - return *s.UpdatedAt -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (s *Subscription) GetCreatedAt() Timestamp { - if s == nil || s.CreatedAt == nil { - return Timestamp{} - } - return *s.CreatedAt -} - -// GetIgnored returns the Ignored field if it's non-nil, zero value otherwise. -func (s *Subscription) GetIgnored() bool { - if s == nil || s.Ignored == nil { - return false - } - return *s.Ignored -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (s *Subscription) GetReason() string { - if s == nil || s.Reason == nil { - return "" - } - return *s.Reason -} - -// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. -func (s *Subscription) GetRepositoryURL() string { - if s == nil || s.RepositoryURL == nil { - return "" - } - return *s.RepositoryURL -} - -// GetSubscribed returns the Subscribed field if it's non-nil, zero value otherwise. -func (s *Subscription) GetSubscribed() bool { - if s == nil || s.Subscribed == nil { - return false - } - return *s.Subscribed -} - -// GetThreadURL returns the ThreadURL field if it's non-nil, zero value otherwise. -func (s *Subscription) GetThreadURL() string { - if s == nil || s.ThreadURL == nil { - return "" - } - return *s.ThreadURL -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *Subscription) GetURL() string { - if s == nil || s.URL == nil { - return "" - } - return *s.URL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (t *Tag) GetMessage() string { - if t == nil || t.Message == nil { - return "" - } - return *t.Message -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (t *Tag) GetNodeID() string { - if t == nil || t.NodeID == nil { - return "" - } - return *t.NodeID -} - -// GetObject returns the Object field. -func (t *Tag) GetObject() *GitObject { - if t == nil { - return nil - } - return t.Object -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (t *Tag) GetSHA() string { - if t == nil || t.SHA == nil { - return "" - } - return *t.SHA -} - -// GetTag returns the Tag field if it's non-nil, zero value otherwise. -func (t *Tag) GetTag() string { - if t == nil || t.Tag == nil { - return "" - } - return *t.Tag -} - -// GetTagger returns the Tagger field. -func (t *Tag) GetTagger() *CommitAuthor { - if t == nil { - return nil - } - return t.Tagger -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *Tag) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetVerification returns the Verification field. -func (t *Tag) GetVerification() *SignatureVerification { - if t == nil { - return nil - } - return t.Verification -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (t *TagProtection) GetID() int64 { - if t == nil || t.ID == nil { - return 0 - } - return *t.ID -} - -// GetPattern returns the Pattern field if it's non-nil, zero value otherwise. -func (t *TagProtection) GetPattern() string { - if t == nil || t.Pattern == nil { - return "" - } - return *t.Pattern -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (t *TaskStep) GetCompletedAt() Timestamp { - if t == nil || t.CompletedAt == nil { - return Timestamp{} - } - return *t.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (t *TaskStep) GetConclusion() string { - if t == nil || t.Conclusion == nil { - return "" - } - return *t.Conclusion -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *TaskStep) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (t *TaskStep) GetNumber() int64 { - if t == nil || t.Number == nil { - return 0 - } - return *t.Number -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (t *TaskStep) GetStartedAt() Timestamp { - if t == nil || t.StartedAt == nil { - return Timestamp{} - } - return *t.StartedAt -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (t *TaskStep) GetStatus() string { - if t == nil || t.Status == nil { - return "" - } - return *t.Status -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (t *Team) GetDescription() string { - if t == nil || t.Description == nil { - return "" - } - return *t.Description -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (t *Team) GetHTMLURL() string { - if t == nil || t.HTMLURL == nil { - return "" - } - return *t.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (t *Team) GetID() int64 { - if t == nil || t.ID == nil { - return 0 - } - return *t.ID -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (t *Team) GetLDAPDN() string { - if t == nil || t.LDAPDN == nil { - return "" - } - return *t.LDAPDN -} - -// GetMembersCount returns the MembersCount field if it's non-nil, zero value otherwise. -func (t *Team) GetMembersCount() int { - if t == nil || t.MembersCount == nil { - return 0 - } - return *t.MembersCount -} - -// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise. -func (t *Team) GetMembersURL() string { - if t == nil || t.MembersURL == nil { - return "" - } - return *t.MembersURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *Team) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (t *Team) GetNodeID() string { - if t == nil || t.NodeID == nil { - return "" - } - return *t.NodeID -} - -// GetOrganization returns the Organization field. -func (t *Team) GetOrganization() *Organization { - if t == nil { - return nil - } - return t.Organization -} - -// GetParent returns the Parent field. -func (t *Team) GetParent() *Team { - if t == nil { - return nil - } - return t.Parent -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (t *Team) GetPermission() string { - if t == nil || t.Permission == nil { - return "" - } - return *t.Permission -} - -// GetPermissions returns the Permissions map if it's non-nil, an empty map otherwise. -func (t *Team) GetPermissions() map[string]bool { - if t == nil || t.Permissions == nil { - return map[string]bool{} - } - return t.Permissions -} - -// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise. -func (t *Team) GetPrivacy() string { - if t == nil || t.Privacy == nil { - return "" - } - return *t.Privacy -} - -// GetReposCount returns the ReposCount field if it's non-nil, zero value otherwise. -func (t *Team) GetReposCount() int { - if t == nil || t.ReposCount == nil { - return 0 - } - return *t.ReposCount -} - -// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise. -func (t *Team) GetRepositoriesURL() string { - if t == nil || t.RepositoriesURL == nil { - return "" - } - return *t.RepositoriesURL -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (t *Team) GetSlug() string { - if t == nil || t.Slug == nil { - return "" - } - return *t.Slug -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *Team) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetInstallation returns the Installation field. -func (t *TeamAddEvent) GetInstallation() *Installation { - if t == nil { - return nil - } - return t.Installation -} - -// GetOrg returns the Org field. -func (t *TeamAddEvent) GetOrg() *Organization { - if t == nil { - return nil - } - return t.Org -} - -// GetRepo returns the Repo field. -func (t *TeamAddEvent) GetRepo() *Repository { - if t == nil { - return nil - } - return t.Repo -} - -// GetSender returns the Sender field. -func (t *TeamAddEvent) GetSender() *User { - if t == nil { - return nil - } - return t.Sender -} - -// GetTeam returns the Team field. -func (t *TeamAddEvent) GetTeam() *Team { - if t == nil { - return nil - } - return t.Team -} - -// GetDescription returns the Description field. -func (t *TeamChange) GetDescription() *TeamDescription { - if t == nil { - return nil - } - return t.Description -} - -// GetName returns the Name field. -func (t *TeamChange) GetName() *TeamName { - if t == nil { - return nil - } - return t.Name -} - -// GetPrivacy returns the Privacy field. -func (t *TeamChange) GetPrivacy() *TeamPrivacy { - if t == nil { - return nil - } - return t.Privacy -} - -// GetRepository returns the Repository field. -func (t *TeamChange) GetRepository() *TeamRepository { - if t == nil { - return nil - } - return t.Repository -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (t *TeamDescription) GetFrom() string { - if t == nil || t.From == nil { - return "" - } - return *t.From -} - -// GetAuthor returns the Author field. -func (t *TeamDiscussion) GetAuthor() *User { - if t == nil { - return nil - } - return t.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetBody() string { - if t == nil || t.Body == nil { - return "" - } - return *t.Body -} - -// GetBodyHTML returns the BodyHTML field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetBodyHTML() string { - if t == nil || t.BodyHTML == nil { - return "" - } - return *t.BodyHTML -} - -// GetBodyVersion returns the BodyVersion field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetBodyVersion() string { - if t == nil || t.BodyVersion == nil { - return "" - } - return *t.BodyVersion -} - -// GetCommentsCount returns the CommentsCount field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetCommentsCount() int { - if t == nil || t.CommentsCount == nil { - return 0 - } - return *t.CommentsCount -} - -// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetCommentsURL() string { - if t == nil || t.CommentsURL == nil { - return "" - } - return *t.CommentsURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetCreatedAt() Timestamp { - if t == nil || t.CreatedAt == nil { - return Timestamp{} - } - return *t.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetHTMLURL() string { - if t == nil || t.HTMLURL == nil { - return "" - } - return *t.HTMLURL -} - -// GetLastEditedAt returns the LastEditedAt field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetLastEditedAt() Timestamp { - if t == nil || t.LastEditedAt == nil { - return Timestamp{} - } - return *t.LastEditedAt -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetNodeID() string { - if t == nil || t.NodeID == nil { - return "" - } - return *t.NodeID -} - -// GetNumber returns the Number field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetNumber() int { - if t == nil || t.Number == nil { - return 0 - } - return *t.Number -} - -// GetPinned returns the Pinned field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetPinned() bool { - if t == nil || t.Pinned == nil { - return false - } - return *t.Pinned -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetPrivate() bool { - if t == nil || t.Private == nil { - return false - } - return *t.Private -} - -// GetReactions returns the Reactions field. -func (t *TeamDiscussion) GetReactions() *Reactions { - if t == nil { - return nil - } - return t.Reactions -} - -// GetTeamURL returns the TeamURL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetTeamURL() string { - if t == nil || t.TeamURL == nil { - return "" - } - return *t.TeamURL -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetTitle() string { - if t == nil || t.Title == nil { - return "" - } - return *t.Title -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetUpdatedAt() Timestamp { - if t == nil || t.UpdatedAt == nil { - return Timestamp{} - } - return *t.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *TeamDiscussion) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (t *TeamEvent) GetAction() string { - if t == nil || t.Action == nil { - return "" - } - return *t.Action -} - -// GetChanges returns the Changes field. -func (t *TeamEvent) GetChanges() *TeamChange { - if t == nil { - return nil - } - return t.Changes -} - -// GetInstallation returns the Installation field. -func (t *TeamEvent) GetInstallation() *Installation { - if t == nil { - return nil - } - return t.Installation -} - -// GetOrg returns the Org field. -func (t *TeamEvent) GetOrg() *Organization { - if t == nil { - return nil - } - return t.Org -} - -// GetRepo returns the Repo field. -func (t *TeamEvent) GetRepo() *Repository { - if t == nil { - return nil - } - return t.Repo -} - -// GetSender returns the Sender field. -func (t *TeamEvent) GetSender() *User { - if t == nil { - return nil - } - return t.Sender -} - -// GetTeam returns the Team field. -func (t *TeamEvent) GetTeam() *Team { - if t == nil { - return nil - } - return t.Team -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetDescription() string { - if t == nil || t.Description == nil { - return "" - } - return *t.Description -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetID() int64 { - if t == nil || t.ID == nil { - return 0 - } - return *t.ID -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetLDAPDN() string { - if t == nil || t.LDAPDN == nil { - return "" - } - return *t.LDAPDN -} - -// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetMembersURL() string { - if t == nil || t.MembersURL == nil { - return "" - } - return *t.MembersURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetPermission() string { - if t == nil || t.Permission == nil { - return "" - } - return *t.Permission -} - -// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetPrivacy() string { - if t == nil || t.Privacy == nil { - return "" - } - return *t.Privacy -} - -// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetRepositoriesURL() string { - if t == nil || t.RepositoriesURL == nil { - return "" - } - return *t.RepositoriesURL -} - -// GetSlug returns the Slug field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetSlug() string { - if t == nil || t.Slug == nil { - return "" - } - return *t.Slug -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *TeamLDAPMapping) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (t *TeamName) GetFrom() string { - if t == nil || t.From == nil { - return "" - } - return *t.From -} - -// GetFrom returns the From field. -func (t *TeamPermissions) GetFrom() *TeamPermissionsFrom { - if t == nil { - return nil - } - return t.From -} - -// GetAdmin returns the Admin field if it's non-nil, zero value otherwise. -func (t *TeamPermissionsFrom) GetAdmin() bool { - if t == nil || t.Admin == nil { - return false - } - return *t.Admin -} - -// GetPull returns the Pull field if it's non-nil, zero value otherwise. -func (t *TeamPermissionsFrom) GetPull() bool { - if t == nil || t.Pull == nil { - return false - } - return *t.Pull -} - -// GetPush returns the Push field if it's non-nil, zero value otherwise. -func (t *TeamPermissionsFrom) GetPush() bool { - if t == nil || t.Push == nil { - return false - } - return *t.Push -} - -// GetFrom returns the From field if it's non-nil, zero value otherwise. -func (t *TeamPrivacy) GetFrom() string { - if t == nil || t.From == nil { - return "" - } - return *t.From -} - -// GetPermission returns the Permission field if it's non-nil, zero value otherwise. -func (t *TeamProjectOptions) GetPermission() string { - if t == nil || t.Permission == nil { - return "" - } - return *t.Permission -} - -// GetPermissions returns the Permissions field. -func (t *TeamRepository) GetPermissions() *TeamPermissions { - if t == nil { - return nil - } - return t.Permissions -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (t *TemplateRepoRequest) GetDescription() string { - if t == nil || t.Description == nil { - return "" - } - return *t.Description -} - -// GetIncludeAllBranches returns the IncludeAllBranches field if it's non-nil, zero value otherwise. -func (t *TemplateRepoRequest) GetIncludeAllBranches() bool { - if t == nil || t.IncludeAllBranches == nil { - return false - } - return *t.IncludeAllBranches -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *TemplateRepoRequest) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetOwner returns the Owner field if it's non-nil, zero value otherwise. -func (t *TemplateRepoRequest) GetOwner() string { - if t == nil || t.Owner == nil { - return "" - } - return *t.Owner -} - -// GetPrivate returns the Private field if it's non-nil, zero value otherwise. -func (t *TemplateRepoRequest) GetPrivate() bool { - if t == nil || t.Private == nil { - return false - } - return *t.Private -} - -// GetFragment returns the Fragment field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetFragment() string { - if t == nil || t.Fragment == nil { - return "" - } - return *t.Fragment -} - -// GetObjectType returns the ObjectType field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetObjectType() string { - if t == nil || t.ObjectType == nil { - return "" - } - return *t.ObjectType -} - -// GetObjectURL returns the ObjectURL field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetObjectURL() string { - if t == nil || t.ObjectURL == nil { - return "" - } - return *t.ObjectURL -} - -// GetProperty returns the Property field if it's non-nil, zero value otherwise. -func (t *TextMatch) GetProperty() string { - if t == nil || t.Property == nil { - return "" - } - return *t.Property -} - -// GetActor returns the Actor field. -func (t *Timeline) GetActor() *User { - if t == nil { - return nil - } - return t.Actor -} - -// GetAssignee returns the Assignee field. -func (t *Timeline) GetAssignee() *User { - if t == nil { - return nil - } - return t.Assignee -} - -// GetAssigner returns the Assigner field. -func (t *Timeline) GetAssigner() *User { - if t == nil { - return nil - } - return t.Assigner -} - -// GetAuthor returns the Author field. -func (t *Timeline) GetAuthor() *CommitAuthor { - if t == nil { - return nil - } - return t.Author -} - -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (t *Timeline) GetBody() string { - if t == nil || t.Body == nil { - return "" - } - return *t.Body -} - -// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise. -func (t *Timeline) GetCommitID() string { - if t == nil || t.CommitID == nil { - return "" - } - return *t.CommitID -} - -// GetCommitter returns the Committer field. -func (t *Timeline) GetCommitter() *CommitAuthor { - if t == nil { - return nil - } - return t.Committer -} - -// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise. -func (t *Timeline) GetCommitURL() string { - if t == nil || t.CommitURL == nil { - return "" - } - return *t.CommitURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (t *Timeline) GetCreatedAt() Timestamp { - if t == nil || t.CreatedAt == nil { - return Timestamp{} - } - return *t.CreatedAt -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (t *Timeline) GetEvent() string { - if t == nil || t.Event == nil { - return "" - } - return *t.Event -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (t *Timeline) GetID() int64 { - if t == nil || t.ID == nil { - return 0 - } - return *t.ID -} - -// GetLabel returns the Label field. -func (t *Timeline) GetLabel() *Label { - if t == nil { - return nil - } - return t.Label -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (t *Timeline) GetMessage() string { - if t == nil || t.Message == nil { - return "" - } - return *t.Message -} - -// GetMilestone returns the Milestone field. -func (t *Timeline) GetMilestone() *Milestone { - if t == nil { - return nil - } - return t.Milestone -} - -// GetProjectCard returns the ProjectCard field. -func (t *Timeline) GetProjectCard() *ProjectCard { - if t == nil { - return nil - } - return t.ProjectCard -} - -// GetRename returns the Rename field. -func (t *Timeline) GetRename() *Rename { - if t == nil { - return nil - } - return t.Rename -} - -// GetRequestedTeam returns the RequestedTeam field. -func (t *Timeline) GetRequestedTeam() *Team { - if t == nil { - return nil - } - return t.RequestedTeam -} - -// GetRequester returns the Requester field. -func (t *Timeline) GetRequester() *User { - if t == nil { - return nil - } - return t.Requester -} - -// GetReviewer returns the Reviewer field. -func (t *Timeline) GetReviewer() *User { - if t == nil { - return nil - } - return t.Reviewer -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (t *Timeline) GetSHA() string { - if t == nil || t.SHA == nil { - return "" - } - return *t.SHA -} - -// GetSource returns the Source field. -func (t *Timeline) GetSource() *Source { - if t == nil { - return nil - } - return t.Source -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (t *Timeline) GetState() string { - if t == nil || t.State == nil { - return "" - } - return *t.State -} - -// GetSubmittedAt returns the SubmittedAt field if it's non-nil, zero value otherwise. -func (t *Timeline) GetSubmittedAt() Timestamp { - if t == nil || t.SubmittedAt == nil { - return Timestamp{} - } - return *t.SubmittedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *Timeline) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetUser returns the User field. -func (t *Timeline) GetUser() *User { - if t == nil { - return nil - } - return t.User -} - -// GetGUID returns the GUID field if it's non-nil, zero value otherwise. -func (t *Tool) GetGUID() string { - if t == nil || t.GUID == nil { - return "" - } - return *t.GUID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *Tool) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetVersion returns the Version field if it's non-nil, zero value otherwise. -func (t *Tool) GetVersion() string { - if t == nil || t.Version == nil { - return "" - } - return *t.Version -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetCreatedAt() Timestamp { - if t == nil || t.CreatedAt == nil { - return Timestamp{} - } - return *t.CreatedAt -} - -// GetCreatedBy returns the CreatedBy field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetCreatedBy() string { - if t == nil || t.CreatedBy == nil { - return "" - } - return *t.CreatedBy -} - -// GetCurated returns the Curated field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetCurated() bool { - if t == nil || t.Curated == nil { - return false - } - return *t.Curated -} - -// GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetDescription() string { - if t == nil || t.Description == nil { - return "" - } - return *t.Description -} - -// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetDisplayName() string { - if t == nil || t.DisplayName == nil { - return "" - } - return *t.DisplayName -} - -// GetFeatured returns the Featured field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetFeatured() bool { - if t == nil || t.Featured == nil { - return false - } - return *t.Featured -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetName() string { - if t == nil || t.Name == nil { - return "" - } - return *t.Name -} - -// GetScore returns the Score field. -func (t *TopicResult) GetScore() *float64 { - if t == nil { - return nil - } - return t.Score -} - -// GetShortDescription returns the ShortDescription field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetShortDescription() string { - if t == nil || t.ShortDescription == nil { - return "" - } - return *t.ShortDescription -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (t *TopicResult) GetUpdatedAt() string { - if t == nil || t.UpdatedAt == nil { - return "" - } - return *t.UpdatedAt -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (t *TopicsSearchResult) GetIncompleteResults() bool { - if t == nil || t.IncompleteResults == nil { - return false - } - return *t.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (t *TopicsSearchResult) GetTotal() int { - if t == nil || t.Total == nil { - return 0 - } - return *t.Total -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficClones) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficClones) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficData) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise. -func (t *TrafficData) GetTimestamp() Timestamp { - if t == nil || t.Timestamp == nil { - return Timestamp{} - } - return *t.Timestamp -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficData) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetPath() string { - if t == nil || t.Path == nil { - return "" - } - return *t.Path -} - -// GetTitle returns the Title field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetTitle() string { - if t == nil || t.Title == nil { - return "" - } - return *t.Title -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficPath) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficReferrer) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetReferrer returns the Referrer field if it's non-nil, zero value otherwise. -func (t *TrafficReferrer) GetReferrer() string { - if t == nil || t.Referrer == nil { - return "" - } - return *t.Referrer -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficReferrer) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetCount returns the Count field if it's non-nil, zero value otherwise. -func (t *TrafficViews) GetCount() int { - if t == nil || t.Count == nil { - return 0 - } - return *t.Count -} - -// GetUniques returns the Uniques field if it's non-nil, zero value otherwise. -func (t *TrafficViews) GetUniques() int { - if t == nil || t.Uniques == nil { - return 0 - } - return *t.Uniques -} - -// GetNewName returns the NewName field if it's non-nil, zero value otherwise. -func (t *TransferRequest) GetNewName() string { - if t == nil || t.NewName == nil { - return "" - } - return *t.NewName -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (t *Tree) GetSHA() string { - if t == nil || t.SHA == nil { - return "" - } - return *t.SHA -} - -// GetTruncated returns the Truncated field if it's non-nil, zero value otherwise. -func (t *Tree) GetTruncated() bool { - if t == nil || t.Truncated == nil { - return false - } - return *t.Truncated -} - -// GetContent returns the Content field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetContent() string { - if t == nil || t.Content == nil { - return "" - } - return *t.Content -} - -// GetMode returns the Mode field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetMode() string { - if t == nil || t.Mode == nil { - return "" - } - return *t.Mode -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetPath() string { - if t == nil || t.Path == nil { - return "" - } - return *t.Path -} - -// GetSHA returns the SHA field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetSHA() string { - if t == nil || t.SHA == nil { - return "" - } - return *t.SHA -} - -// GetSize returns the Size field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetSize() int { - if t == nil || t.Size == nil { - return 0 - } - return *t.Size -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetType() string { - if t == nil || t.Type == nil { - return "" - } - return *t.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (t *TreeEntry) GetURL() string { - if t == nil || t.URL == nil { - return "" - } - return *t.URL -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (u *UpdateAttributeForSCIMUserOperations) GetPath() string { - if u == nil || u.Path == nil { - return "" - } - return *u.Path -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetCompletedAt() Timestamp { - if u == nil || u.CompletedAt == nil { - return Timestamp{} - } - return *u.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetConclusion() string { - if u == nil || u.Conclusion == nil { - return "" - } - return *u.Conclusion -} - -// GetDetailsURL returns the DetailsURL field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetDetailsURL() string { - if u == nil || u.DetailsURL == nil { - return "" - } - return *u.DetailsURL -} - -// GetExternalID returns the ExternalID field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetExternalID() string { - if u == nil || u.ExternalID == nil { - return "" - } - return *u.ExternalID -} - -// GetOutput returns the Output field. -func (u *UpdateCheckRunOptions) GetOutput() *CheckRunOutput { - if u == nil { - return nil - } - return u.Output -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (u *UpdateCheckRunOptions) GetStatus() string { - if u == nil || u.Status == nil { - return "" - } - return *u.Status -} - -// GetQuerySuite returns the QuerySuite field if it's non-nil, zero value otherwise. -func (u *UpdateDefaultSetupConfigurationOptions) GetQuerySuite() string { - if u == nil || u.QuerySuite == nil { - return "" - } - return *u.QuerySuite -} - -// GetRunID returns the RunID field if it's non-nil, zero value otherwise. -func (u *UpdateDefaultSetupConfigurationResponse) GetRunID() int64 { - if u == nil || u.RunID == nil { - return 0 - } - return *u.RunID -} - -// GetRunURL returns the RunURL field if it's non-nil, zero value otherwise. -func (u *UpdateDefaultSetupConfigurationResponse) GetRunURL() string { - if u == nil || u.RunURL == nil { - return "" - } - return *u.RunURL -} - -// GetAllowsPublicRepositories returns the AllowsPublicRepositories field if it's non-nil, zero value otherwise. -func (u *UpdateRunnerGroupRequest) GetAllowsPublicRepositories() bool { - if u == nil || u.AllowsPublicRepositories == nil { - return false - } - return *u.AllowsPublicRepositories -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (u *UpdateRunnerGroupRequest) GetName() string { - if u == nil || u.Name == nil { - return "" - } - return *u.Name -} - -// GetRestrictedToWorkflows returns the RestrictedToWorkflows field if it's non-nil, zero value otherwise. -func (u *UpdateRunnerGroupRequest) GetRestrictedToWorkflows() bool { - if u == nil || u.RestrictedToWorkflows == nil { - return false - } - return *u.RestrictedToWorkflows -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (u *UpdateRunnerGroupRequest) GetVisibility() string { - if u == nil || u.Visibility == nil { - return "" - } - return *u.Visibility -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (u *User) GetAvatarURL() string { - if u == nil || u.AvatarURL == nil { - return "" - } - return *u.AvatarURL -} - -// GetBio returns the Bio field if it's non-nil, zero value otherwise. -func (u *User) GetBio() string { - if u == nil || u.Bio == nil { - return "" - } - return *u.Bio -} - -// GetBlog returns the Blog field if it's non-nil, zero value otherwise. -func (u *User) GetBlog() string { - if u == nil || u.Blog == nil { - return "" - } - return *u.Blog -} - -// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise. -func (u *User) GetCollaborators() int { - if u == nil || u.Collaborators == nil { - return 0 - } - return *u.Collaborators -} - -// GetCompany returns the Company field if it's non-nil, zero value otherwise. -func (u *User) GetCompany() string { - if u == nil || u.Company == nil { - return "" - } - return *u.Company -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (u *User) GetCreatedAt() Timestamp { - if u == nil || u.CreatedAt == nil { - return Timestamp{} - } - return *u.CreatedAt -} - -// GetDiskUsage returns the DiskUsage field if it's non-nil, zero value otherwise. -func (u *User) GetDiskUsage() int { - if u == nil || u.DiskUsage == nil { - return 0 - } - return *u.DiskUsage -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (u *User) GetEmail() string { - if u == nil || u.Email == nil { - return "" - } - return *u.Email -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (u *User) GetEventsURL() string { - if u == nil || u.EventsURL == nil { - return "" - } - return *u.EventsURL -} - -// GetFollowers returns the Followers field if it's non-nil, zero value otherwise. -func (u *User) GetFollowers() int { - if u == nil || u.Followers == nil { - return 0 - } - return *u.Followers -} - -// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise. -func (u *User) GetFollowersURL() string { - if u == nil || u.FollowersURL == nil { - return "" - } - return *u.FollowersURL -} - -// GetFollowing returns the Following field if it's non-nil, zero value otherwise. -func (u *User) GetFollowing() int { - if u == nil || u.Following == nil { - return 0 - } - return *u.Following -} - -// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise. -func (u *User) GetFollowingURL() string { - if u == nil || u.FollowingURL == nil { - return "" - } - return *u.FollowingURL -} - -// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise. -func (u *User) GetGistsURL() string { - if u == nil || u.GistsURL == nil { - return "" - } - return *u.GistsURL -} - -// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise. -func (u *User) GetGravatarID() string { - if u == nil || u.GravatarID == nil { - return "" - } - return *u.GravatarID -} - -// GetHireable returns the Hireable field if it's non-nil, zero value otherwise. -func (u *User) GetHireable() bool { - if u == nil || u.Hireable == nil { - return false - } - return *u.Hireable -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (u *User) GetHTMLURL() string { - if u == nil || u.HTMLURL == nil { - return "" - } - return *u.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (u *User) GetID() int64 { - if u == nil || u.ID == nil { - return 0 - } - return *u.ID -} - -// GetLdapDn returns the LdapDn field if it's non-nil, zero value otherwise. -func (u *User) GetLdapDn() string { - if u == nil || u.LdapDn == nil { - return "" - } - return *u.LdapDn -} - -// GetLocation returns the Location field if it's non-nil, zero value otherwise. -func (u *User) GetLocation() string { - if u == nil || u.Location == nil { - return "" - } - return *u.Location -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (u *User) GetLogin() string { - if u == nil || u.Login == nil { - return "" - } - return *u.Login -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (u *User) GetName() string { - if u == nil || u.Name == nil { - return "" - } - return *u.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (u *User) GetNodeID() string { - if u == nil || u.NodeID == nil { - return "" - } - return *u.NodeID -} - -// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise. -func (u *User) GetOrganizationsURL() string { - if u == nil || u.OrganizationsURL == nil { - return "" - } - return *u.OrganizationsURL -} - -// GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise. -func (u *User) GetOwnedPrivateRepos() int64 { - if u == nil || u.OwnedPrivateRepos == nil { - return 0 - } - return *u.OwnedPrivateRepos -} - -// GetPermissions returns the Permissions map if it's non-nil, an empty map otherwise. -func (u *User) GetPermissions() map[string]bool { - if u == nil || u.Permissions == nil { - return map[string]bool{} - } - return u.Permissions -} - -// GetPlan returns the Plan field. -func (u *User) GetPlan() *Plan { - if u == nil { - return nil - } - return u.Plan -} - -// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise. -func (u *User) GetPrivateGists() int { - if u == nil || u.PrivateGists == nil { - return 0 - } - return *u.PrivateGists -} - -// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise. -func (u *User) GetPublicGists() int { - if u == nil || u.PublicGists == nil { - return 0 - } - return *u.PublicGists -} - -// GetPublicRepos returns the PublicRepos field if it's non-nil, zero value otherwise. -func (u *User) GetPublicRepos() int { - if u == nil || u.PublicRepos == nil { - return 0 - } - return *u.PublicRepos -} - -// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise. -func (u *User) GetReceivedEventsURL() string { - if u == nil || u.ReceivedEventsURL == nil { - return "" - } - return *u.ReceivedEventsURL -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (u *User) GetReposURL() string { - if u == nil || u.ReposURL == nil { - return "" - } - return *u.ReposURL -} - -// GetRoleName returns the RoleName field if it's non-nil, zero value otherwise. -func (u *User) GetRoleName() string { - if u == nil || u.RoleName == nil { - return "" - } - return *u.RoleName -} - -// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise. -func (u *User) GetSiteAdmin() bool { - if u == nil || u.SiteAdmin == nil { - return false - } - return *u.SiteAdmin -} - -// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise. -func (u *User) GetStarredURL() string { - if u == nil || u.StarredURL == nil { - return "" - } - return *u.StarredURL -} - -// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise. -func (u *User) GetSubscriptionsURL() string { - if u == nil || u.SubscriptionsURL == nil { - return "" - } - return *u.SubscriptionsURL -} - -// GetSuspendedAt returns the SuspendedAt field if it's non-nil, zero value otherwise. -func (u *User) GetSuspendedAt() Timestamp { - if u == nil || u.SuspendedAt == nil { - return Timestamp{} - } - return *u.SuspendedAt -} - -// GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise. -func (u *User) GetTotalPrivateRepos() int64 { - if u == nil || u.TotalPrivateRepos == nil { - return 0 - } - return *u.TotalPrivateRepos -} - -// GetTwitterUsername returns the TwitterUsername field if it's non-nil, zero value otherwise. -func (u *User) GetTwitterUsername() string { - if u == nil || u.TwitterUsername == nil { - return "" - } - return *u.TwitterUsername -} - -// GetTwoFactorAuthentication returns the TwoFactorAuthentication field if it's non-nil, zero value otherwise. -func (u *User) GetTwoFactorAuthentication() bool { - if u == nil || u.TwoFactorAuthentication == nil { - return false - } - return *u.TwoFactorAuthentication -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (u *User) GetType() string { - if u == nil || u.Type == nil { - return "" - } - return *u.Type -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (u *User) GetUpdatedAt() Timestamp { - if u == nil || u.UpdatedAt == nil { - return Timestamp{} - } - return *u.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (u *User) GetURL() string { - if u == nil || u.URL == nil { - return "" - } - return *u.URL -} - -// GetApp returns the App field. -func (u *UserAuthorization) GetApp() *OAuthAPP { - if u == nil { - return nil - } - return u.App -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetCreatedAt() Timestamp { - if u == nil || u.CreatedAt == nil { - return Timestamp{} - } - return *u.CreatedAt -} - -// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetFingerprint() string { - if u == nil || u.Fingerprint == nil { - return "" - } - return *u.Fingerprint -} - -// GetHashedToken returns the HashedToken field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetHashedToken() string { - if u == nil || u.HashedToken == nil { - return "" - } - return *u.HashedToken -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetID() int64 { - if u == nil || u.ID == nil { - return 0 - } - return *u.ID -} - -// GetNote returns the Note field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetNote() string { - if u == nil || u.Note == nil { - return "" - } - return *u.Note -} - -// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetNoteURL() string { - if u == nil || u.NoteURL == nil { - return "" - } - return *u.NoteURL -} - -// GetToken returns the Token field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetToken() string { - if u == nil || u.Token == nil { - return "" - } - return *u.Token -} - -// GetTokenLastEight returns the TokenLastEight field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetTokenLastEight() string { - if u == nil || u.TokenLastEight == nil { - return "" - } - return *u.TokenLastEight -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetUpdatedAt() Timestamp { - if u == nil || u.UpdatedAt == nil { - return Timestamp{} - } - return *u.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (u *UserAuthorization) GetURL() string { - if u == nil || u.URL == nil { - return "" - } - return *u.URL -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (u *UserContext) GetMessage() string { - if u == nil || u.Message == nil { - return "" - } - return *u.Message -} - -// GetOcticon returns the Octicon field if it's non-nil, zero value otherwise. -func (u *UserContext) GetOcticon() string { - if u == nil || u.Octicon == nil { - return "" - } - return *u.Octicon -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (u *UserEmail) GetEmail() string { - if u == nil || u.Email == nil { - return "" - } - return *u.Email -} - -// GetPrimary returns the Primary field if it's non-nil, zero value otherwise. -func (u *UserEmail) GetPrimary() bool { - if u == nil || u.Primary == nil { - return false - } - return *u.Primary -} - -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (u *UserEmail) GetVerified() bool { - if u == nil || u.Verified == nil { - return false - } - return *u.Verified -} - -// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. -func (u *UserEmail) GetVisibility() string { - if u == nil || u.Visibility == nil { - return "" - } - return *u.Visibility -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (u *UserEvent) GetAction() string { - if u == nil || u.Action == nil { - return "" - } - return *u.Action -} - -// GetEnterprise returns the Enterprise field. -func (u *UserEvent) GetEnterprise() *Enterprise { - if u == nil { - return nil - } - return u.Enterprise -} - -// GetInstallation returns the Installation field. -func (u *UserEvent) GetInstallation() *Installation { - if u == nil { - return nil - } - return u.Installation -} - -// GetSender returns the Sender field. -func (u *UserEvent) GetSender() *User { - if u == nil { - return nil - } - return u.Sender -} - -// GetUser returns the User field. -func (u *UserEvent) GetUser() *User { - if u == nil { - return nil - } - return u.User -} - -// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetAvatarURL() string { - if u == nil || u.AvatarURL == nil { - return "" - } - return *u.AvatarURL -} - -// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetEventsURL() string { - if u == nil || u.EventsURL == nil { - return "" - } - return *u.EventsURL -} - -// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetFollowersURL() string { - if u == nil || u.FollowersURL == nil { - return "" - } - return *u.FollowersURL -} - -// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetFollowingURL() string { - if u == nil || u.FollowingURL == nil { - return "" - } - return *u.FollowingURL -} - -// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetGistsURL() string { - if u == nil || u.GistsURL == nil { - return "" - } - return *u.GistsURL -} - -// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetGravatarID() string { - if u == nil || u.GravatarID == nil { - return "" - } - return *u.GravatarID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetID() int64 { - if u == nil || u.ID == nil { - return 0 - } - return *u.ID -} - -// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetLDAPDN() string { - if u == nil || u.LDAPDN == nil { - return "" - } - return *u.LDAPDN -} - -// GetLogin returns the Login field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetLogin() string { - if u == nil || u.Login == nil { - return "" - } - return *u.Login -} - -// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetOrganizationsURL() string { - if u == nil || u.OrganizationsURL == nil { - return "" - } - return *u.OrganizationsURL -} - -// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetReceivedEventsURL() string { - if u == nil || u.ReceivedEventsURL == nil { - return "" - } - return *u.ReceivedEventsURL -} - -// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetReposURL() string { - if u == nil || u.ReposURL == nil { - return "" - } - return *u.ReposURL -} - -// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetSiteAdmin() bool { - if u == nil || u.SiteAdmin == nil { - return false - } - return *u.SiteAdmin -} - -// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetStarredURL() string { - if u == nil || u.StarredURL == nil { - return "" - } - return *u.StarredURL -} - -// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetSubscriptionsURL() string { - if u == nil || u.SubscriptionsURL == nil { - return "" - } - return *u.SubscriptionsURL -} - -// GetType returns the Type field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetType() string { - if u == nil || u.Type == nil { - return "" - } - return *u.Type -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (u *UserLDAPMapping) GetURL() string { - if u == nil || u.URL == nil { - return "" - } - return *u.URL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetCreatedAt() string { - if u == nil || u.CreatedAt == nil { - return "" - } - return *u.CreatedAt -} - -// GetExcludeAttachments returns the ExcludeAttachments field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetExcludeAttachments() bool { - if u == nil || u.ExcludeAttachments == nil { - return false - } - return *u.ExcludeAttachments -} - -// GetGUID returns the GUID field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetGUID() string { - if u == nil || u.GUID == nil { - return "" - } - return *u.GUID -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetID() int64 { - if u == nil || u.ID == nil { - return 0 - } - return *u.ID -} - -// GetLockRepositories returns the LockRepositories field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetLockRepositories() bool { - if u == nil || u.LockRepositories == nil { - return false - } - return *u.LockRepositories -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetState() string { - if u == nil || u.State == nil { - return "" - } - return *u.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetUpdatedAt() string { - if u == nil || u.UpdatedAt == nil { - return "" - } - return *u.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (u *UserMigration) GetURL() string { - if u == nil || u.URL == nil { - return "" - } - return *u.URL -} - -// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. -func (u *UsersSearchResult) GetIncompleteResults() bool { - if u == nil || u.IncompleteResults == nil { - return false - } - return *u.IncompleteResults -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (u *UsersSearchResult) GetTotal() int { - if u == nil || u.Total == nil { - return 0 - } - return *u.Total -} - -// GetAdminUsers returns the AdminUsers field if it's non-nil, zero value otherwise. -func (u *UserStats) GetAdminUsers() int { - if u == nil || u.AdminUsers == nil { - return 0 - } - return *u.AdminUsers -} - -// GetSuspendedUsers returns the SuspendedUsers field if it's non-nil, zero value otherwise. -func (u *UserStats) GetSuspendedUsers() int { - if u == nil || u.SuspendedUsers == nil { - return 0 - } - return *u.SuspendedUsers -} - -// GetTotalUsers returns the TotalUsers field if it's non-nil, zero value otherwise. -func (u *UserStats) GetTotalUsers() int { - if u == nil || u.TotalUsers == nil { - return 0 - } - return *u.TotalUsers -} - -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (u *UserSuspendOptions) GetReason() string { - if u == nil || u.Reason == nil { - return "" - } - return *u.Reason -} - -// GetEcosystem returns the Ecosystem field if it's non-nil, zero value otherwise. -func (v *VulnerabilityPackage) GetEcosystem() string { - if v == nil || v.Ecosystem == nil { - return "" - } - return *v.Ecosystem -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (v *VulnerabilityPackage) GetName() string { - if v == nil || v.Name == nil { - return "" - } - return *v.Name -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (w *WatchEvent) GetAction() string { - if w == nil || w.Action == nil { - return "" - } - return *w.Action -} - -// GetInstallation returns the Installation field. -func (w *WatchEvent) GetInstallation() *Installation { - if w == nil { - return nil - } - return w.Installation -} - -// GetRepo returns the Repo field. -func (w *WatchEvent) GetRepo() *Repository { - if w == nil { - return nil - } - return w.Repo -} - -// GetSender returns the Sender field. -func (w *WatchEvent) GetSender() *User { - if w == nil { - return nil - } - return w.Sender -} - -// GetTotal returns the Total field if it's non-nil, zero value otherwise. -func (w *WeeklyCommitActivity) GetTotal() int { - if w == nil || w.Total == nil { - return 0 - } - return *w.Total -} - -// GetWeek returns the Week field if it's non-nil, zero value otherwise. -func (w *WeeklyCommitActivity) GetWeek() Timestamp { - if w == nil || w.Week == nil { - return Timestamp{} - } - return *w.Week -} - -// GetAdditions returns the Additions field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetAdditions() int { - if w == nil || w.Additions == nil { - return 0 - } - return *w.Additions -} - -// GetCommits returns the Commits field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetCommits() int { - if w == nil || w.Commits == nil { - return 0 - } - return *w.Commits -} - -// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetDeletions() int { - if w == nil || w.Deletions == nil { - return 0 - } - return *w.Deletions -} - -// GetWeek returns the Week field if it's non-nil, zero value otherwise. -func (w *WeeklyStats) GetWeek() Timestamp { - if w == nil || w.Week == nil { - return Timestamp{} - } - return *w.Week -} - -// GetBadgeURL returns the BadgeURL field if it's non-nil, zero value otherwise. -func (w *Workflow) GetBadgeURL() string { - if w == nil || w.BadgeURL == nil { - return "" - } - return *w.BadgeURL -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (w *Workflow) GetCreatedAt() Timestamp { - if w == nil || w.CreatedAt == nil { - return Timestamp{} - } - return *w.CreatedAt -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (w *Workflow) GetHTMLURL() string { - if w == nil || w.HTMLURL == nil { - return "" - } - return *w.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (w *Workflow) GetID() int64 { - if w == nil || w.ID == nil { - return 0 - } - return *w.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (w *Workflow) GetName() string { - if w == nil || w.Name == nil { - return "" - } - return *w.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (w *Workflow) GetNodeID() string { - if w == nil || w.NodeID == nil { - return "" - } - return *w.NodeID -} - -// GetPath returns the Path field if it's non-nil, zero value otherwise. -func (w *Workflow) GetPath() string { - if w == nil || w.Path == nil { - return "" - } - return *w.Path -} - -// GetState returns the State field if it's non-nil, zero value otherwise. -func (w *Workflow) GetState() string { - if w == nil || w.State == nil { - return "" - } - return *w.State -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (w *Workflow) GetUpdatedAt() Timestamp { - if w == nil || w.UpdatedAt == nil { - return Timestamp{} - } - return *w.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (w *Workflow) GetURL() string { - if w == nil || w.URL == nil { - return "" - } - return *w.URL -} - -// GetTotalMS returns the TotalMS field if it's non-nil, zero value otherwise. -func (w *WorkflowBill) GetTotalMS() int64 { - if w == nil || w.TotalMS == nil { - return 0 - } - return *w.TotalMS -} - -// GetInstallation returns the Installation field. -func (w *WorkflowDispatchEvent) GetInstallation() *Installation { - if w == nil { - return nil - } - return w.Installation -} - -// GetOrg returns the Org field. -func (w *WorkflowDispatchEvent) GetOrg() *Organization { - if w == nil { - return nil - } - return w.Org -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (w *WorkflowDispatchEvent) GetRef() string { - if w == nil || w.Ref == nil { - return "" - } - return *w.Ref -} - -// GetRepo returns the Repo field. -func (w *WorkflowDispatchEvent) GetRepo() *Repository { - if w == nil { - return nil - } - return w.Repo -} - -// GetSender returns the Sender field. -func (w *WorkflowDispatchEvent) GetSender() *User { - if w == nil { - return nil - } - return w.Sender -} - -// GetWorkflow returns the Workflow field if it's non-nil, zero value otherwise. -func (w *WorkflowDispatchEvent) GetWorkflow() string { - if w == nil || w.Workflow == nil { - return "" - } - return *w.Workflow -} - -// GetCheckRunURL returns the CheckRunURL field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetCheckRunURL() string { - if w == nil || w.CheckRunURL == nil { - return "" - } - return *w.CheckRunURL -} - -// GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetCompletedAt() Timestamp { - if w == nil || w.CompletedAt == nil { - return Timestamp{} - } - return *w.CompletedAt -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetConclusion() string { - if w == nil || w.Conclusion == nil { - return "" - } - return *w.Conclusion -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetCreatedAt() Timestamp { - if w == nil || w.CreatedAt == nil { - return Timestamp{} - } - return *w.CreatedAt -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetHeadBranch() string { - if w == nil || w.HeadBranch == nil { - return "" - } - return *w.HeadBranch -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetHeadSHA() string { - if w == nil || w.HeadSHA == nil { - return "" - } - return *w.HeadSHA -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetHTMLURL() string { - if w == nil || w.HTMLURL == nil { - return "" - } - return *w.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetID() int64 { - if w == nil || w.ID == nil { - return 0 - } - return *w.ID -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetName() string { - if w == nil || w.Name == nil { - return "" - } - return *w.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetNodeID() string { - if w == nil || w.NodeID == nil { - return "" - } - return *w.NodeID -} - -// GetRunAttempt returns the RunAttempt field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetRunAttempt() int64 { - if w == nil || w.RunAttempt == nil { - return 0 - } - return *w.RunAttempt -} - -// GetRunID returns the RunID field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetRunID() int64 { - if w == nil || w.RunID == nil { - return 0 - } - return *w.RunID -} - -// GetRunnerGroupID returns the RunnerGroupID field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetRunnerGroupID() int64 { - if w == nil || w.RunnerGroupID == nil { - return 0 - } - return *w.RunnerGroupID -} - -// GetRunnerGroupName returns the RunnerGroupName field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetRunnerGroupName() string { - if w == nil || w.RunnerGroupName == nil { - return "" - } - return *w.RunnerGroupName -} - -// GetRunnerID returns the RunnerID field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetRunnerID() int64 { - if w == nil || w.RunnerID == nil { - return 0 - } - return *w.RunnerID -} - -// GetRunnerName returns the RunnerName field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetRunnerName() string { - if w == nil || w.RunnerName == nil { - return "" - } - return *w.RunnerName -} - -// GetRunURL returns the RunURL field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetRunURL() string { - if w == nil || w.RunURL == nil { - return "" - } - return *w.RunURL -} - -// GetStartedAt returns the StartedAt field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetStartedAt() Timestamp { - if w == nil || w.StartedAt == nil { - return Timestamp{} - } - return *w.StartedAt -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetStatus() string { - if w == nil || w.Status == nil { - return "" - } - return *w.Status -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetURL() string { - if w == nil || w.URL == nil { - return "" - } - return *w.URL -} - -// GetWorkflowName returns the WorkflowName field if it's non-nil, zero value otherwise. -func (w *WorkflowJob) GetWorkflowName() string { - if w == nil || w.WorkflowName == nil { - return "" - } - return *w.WorkflowName -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (w *WorkflowJobEvent) GetAction() string { - if w == nil || w.Action == nil { - return "" - } - return *w.Action -} - -// GetInstallation returns the Installation field. -func (w *WorkflowJobEvent) GetInstallation() *Installation { - if w == nil { - return nil - } - return w.Installation -} - -// GetOrg returns the Org field. -func (w *WorkflowJobEvent) GetOrg() *Organization { - if w == nil { - return nil - } - return w.Org -} - -// GetRepo returns the Repo field. -func (w *WorkflowJobEvent) GetRepo() *Repository { - if w == nil { - return nil - } - return w.Repo -} - -// GetSender returns the Sender field. -func (w *WorkflowJobEvent) GetSender() *User { - if w == nil { - return nil - } - return w.Sender -} - -// GetWorkflowJob returns the WorkflowJob field. -func (w *WorkflowJobEvent) GetWorkflowJob() *WorkflowJob { - if w == nil { - return nil - } - return w.WorkflowJob -} - -// GetActor returns the Actor field. -func (w *WorkflowRun) GetActor() *User { - if w == nil { - return nil - } - return w.Actor -} - -// GetArtifactsURL returns the ArtifactsURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetArtifactsURL() string { - if w == nil || w.ArtifactsURL == nil { - return "" - } - return *w.ArtifactsURL -} - -// GetCancelURL returns the CancelURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetCancelURL() string { - if w == nil || w.CancelURL == nil { - return "" - } - return *w.CancelURL -} - -// GetCheckSuiteID returns the CheckSuiteID field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetCheckSuiteID() int64 { - if w == nil || w.CheckSuiteID == nil { - return 0 - } - return *w.CheckSuiteID -} - -// GetCheckSuiteNodeID returns the CheckSuiteNodeID field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetCheckSuiteNodeID() string { - if w == nil || w.CheckSuiteNodeID == nil { - return "" - } - return *w.CheckSuiteNodeID -} - -// GetCheckSuiteURL returns the CheckSuiteURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetCheckSuiteURL() string { - if w == nil || w.CheckSuiteURL == nil { - return "" - } - return *w.CheckSuiteURL -} - -// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetConclusion() string { - if w == nil || w.Conclusion == nil { - return "" - } - return *w.Conclusion -} - -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetCreatedAt() Timestamp { - if w == nil || w.CreatedAt == nil { - return Timestamp{} - } - return *w.CreatedAt -} - -// GetDisplayTitle returns the DisplayTitle field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetDisplayTitle() string { - if w == nil || w.DisplayTitle == nil { - return "" - } - return *w.DisplayTitle -} - -// GetEvent returns the Event field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetEvent() string { - if w == nil || w.Event == nil { - return "" - } - return *w.Event -} - -// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetHeadBranch() string { - if w == nil || w.HeadBranch == nil { - return "" - } - return *w.HeadBranch -} - -// GetHeadCommit returns the HeadCommit field. -func (w *WorkflowRun) GetHeadCommit() *HeadCommit { - if w == nil { - return nil - } - return w.HeadCommit -} - -// GetHeadRepository returns the HeadRepository field. -func (w *WorkflowRun) GetHeadRepository() *Repository { - if w == nil { - return nil - } - return w.HeadRepository -} - -// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetHeadSHA() string { - if w == nil || w.HeadSHA == nil { - return "" - } - return *w.HeadSHA -} - -// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetHTMLURL() string { - if w == nil || w.HTMLURL == nil { - return "" - } - return *w.HTMLURL -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetID() int64 { - if w == nil || w.ID == nil { - return 0 - } - return *w.ID -} - -// GetJobsURL returns the JobsURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetJobsURL() string { - if w == nil || w.JobsURL == nil { - return "" - } - return *w.JobsURL -} - -// GetLogsURL returns the LogsURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetLogsURL() string { - if w == nil || w.LogsURL == nil { - return "" - } - return *w.LogsURL -} - -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetName() string { - if w == nil || w.Name == nil { - return "" - } - return *w.Name -} - -// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetNodeID() string { - if w == nil || w.NodeID == nil { - return "" - } - return *w.NodeID -} - -// GetPreviousAttemptURL returns the PreviousAttemptURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetPreviousAttemptURL() string { - if w == nil || w.PreviousAttemptURL == nil { - return "" - } - return *w.PreviousAttemptURL -} - -// GetRepository returns the Repository field. -func (w *WorkflowRun) GetRepository() *Repository { - if w == nil { - return nil - } - return w.Repository -} - -// GetRerunURL returns the RerunURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetRerunURL() string { - if w == nil || w.RerunURL == nil { - return "" - } - return *w.RerunURL -} - -// GetRunAttempt returns the RunAttempt field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetRunAttempt() int { - if w == nil || w.RunAttempt == nil { - return 0 - } - return *w.RunAttempt -} - -// GetRunNumber returns the RunNumber field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetRunNumber() int { - if w == nil || w.RunNumber == nil { - return 0 - } - return *w.RunNumber -} - -// GetRunStartedAt returns the RunStartedAt field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetRunStartedAt() Timestamp { - if w == nil || w.RunStartedAt == nil { - return Timestamp{} - } - return *w.RunStartedAt -} - -// GetStatus returns the Status field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetStatus() string { - if w == nil || w.Status == nil { - return "" - } - return *w.Status -} - -// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetUpdatedAt() Timestamp { - if w == nil || w.UpdatedAt == nil { - return Timestamp{} - } - return *w.UpdatedAt -} - -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetURL() string { - if w == nil || w.URL == nil { - return "" - } - return *w.URL -} - -// GetWorkflowID returns the WorkflowID field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetWorkflowID() int64 { - if w == nil || w.WorkflowID == nil { - return 0 - } - return *w.WorkflowID -} - -// GetWorkflowURL returns the WorkflowURL field if it's non-nil, zero value otherwise. -func (w *WorkflowRun) GetWorkflowURL() string { - if w == nil || w.WorkflowURL == nil { - return "" - } - return *w.WorkflowURL -} - -// GetExcludePullRequests returns the ExcludePullRequests field if it's non-nil, zero value otherwise. -func (w *WorkflowRunAttemptOptions) GetExcludePullRequests() bool { - if w == nil || w.ExcludePullRequests == nil { - return false - } - return *w.ExcludePullRequests -} - -// GetJobs returns the Jobs field if it's non-nil, zero value otherwise. -func (w *WorkflowRunBill) GetJobs() int { - if w == nil || w.Jobs == nil { - return 0 - } - return *w.Jobs -} - -// GetTotalMS returns the TotalMS field if it's non-nil, zero value otherwise. -func (w *WorkflowRunBill) GetTotalMS() int64 { - if w == nil || w.TotalMS == nil { - return 0 - } - return *w.TotalMS -} - -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (w *WorkflowRunEvent) GetAction() string { - if w == nil || w.Action == nil { - return "" - } - return *w.Action -} - -// GetInstallation returns the Installation field. -func (w *WorkflowRunEvent) GetInstallation() *Installation { - if w == nil { - return nil - } - return w.Installation -} - -// GetOrg returns the Org field. -func (w *WorkflowRunEvent) GetOrg() *Organization { - if w == nil { - return nil - } - return w.Org -} - -// GetRepo returns the Repo field. -func (w *WorkflowRunEvent) GetRepo() *Repository { - if w == nil { - return nil - } - return w.Repo -} - -// GetSender returns the Sender field. -func (w *WorkflowRunEvent) GetSender() *User { - if w == nil { - return nil - } - return w.Sender -} - -// GetWorkflow returns the Workflow field. -func (w *WorkflowRunEvent) GetWorkflow() *Workflow { - if w == nil { - return nil - } - return w.Workflow -} - -// GetWorkflowRun returns the WorkflowRun field. -func (w *WorkflowRunEvent) GetWorkflowRun() *WorkflowRun { - if w == nil { - return nil - } - return w.WorkflowRun -} - -// GetDurationMS returns the DurationMS field if it's non-nil, zero value otherwise. -func (w *WorkflowRunJobRun) GetDurationMS() int64 { - if w == nil || w.DurationMS == nil { - return 0 - } - return *w.DurationMS -} - -// GetJobID returns the JobID field if it's non-nil, zero value otherwise. -func (w *WorkflowRunJobRun) GetJobID() int { - if w == nil || w.JobID == nil { - return 0 - } - return *w.JobID -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (w *WorkflowRuns) GetTotalCount() int { - if w == nil || w.TotalCount == nil { - return 0 - } - return *w.TotalCount -} - -// GetBillable returns the Billable field. -func (w *WorkflowRunUsage) GetBillable() *WorkflowRunBillMap { - if w == nil { - return nil - } - return w.Billable -} - -// GetRunDurationMS returns the RunDurationMS field if it's non-nil, zero value otherwise. -func (w *WorkflowRunUsage) GetRunDurationMS() int64 { - if w == nil || w.RunDurationMS == nil { - return 0 - } - return *w.RunDurationMS -} - -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (w *Workflows) GetTotalCount() int { - if w == nil || w.TotalCount == nil { - return 0 - } - return *w.TotalCount -} - -// GetBillable returns the Billable field. -func (w *WorkflowUsage) GetBillable() *WorkflowBillMap { - if w == nil { - return nil - } - return w.Billable -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/github.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/github.go deleted file mode 100644 index 34a27282f0..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/github.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen-accessors.go -//go:generate go run gen-stringify-test.go - -package github - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/go-querystring/query" - "golang.org/x/oauth2" -) - -const ( - Version = "v53.2.0" - - defaultAPIVersion = "2022-11-28" - defaultBaseURL = "https://api.github.com/" - defaultUserAgent = "go-github" + "/" + Version - uploadBaseURL = "https://uploads.github.com/" - - headerAPIVersion = "X-GitHub-Api-Version" - headerRateLimit = "X-RateLimit-Limit" - headerRateRemaining = "X-RateLimit-Remaining" - headerRateReset = "X-RateLimit-Reset" - headerOTP = "X-GitHub-OTP" - headerRetryAfter = "Retry-After" - - headerTokenExpiration = "GitHub-Authentication-Token-Expiration" - - mediaTypeV3 = "application/vnd.github.v3+json" - defaultMediaType = "application/octet-stream" - mediaTypeV3SHA = "application/vnd.github.v3.sha" - mediaTypeV3Diff = "application/vnd.github.v3.diff" - mediaTypeV3Patch = "application/vnd.github.v3.patch" - mediaTypeOrgPermissionRepo = "application/vnd.github.v3.repository+json" - mediaTypeIssueImportAPI = "application/vnd.github.golden-comet-preview+json" - - // Media Type values to access preview APIs - // These media types will be added to the API request as headers - // and used to enable particular features on GitHub API that are still in preview. - // After some time, specific media types will be promoted (to a "stable" state). - // From then on, the preview headers are not required anymore to activate the additional - // feature on GitHub.com's API. However, this API header might still be needed for users - // to run a GitHub Enterprise Server on-premise. - // It's not uncommon for GitHub Enterprise Server customers to run older versions which - // would probably rely on the preview headers for some time. - // While the header promotion is going out for GitHub.com, it may be some time before it - // even arrives in GitHub Enterprise Server. - // We keep those preview headers around to avoid breaking older GitHub Enterprise Server - // versions. Additionally, non-functional (preview) headers don't create any side effects - // on GitHub Cloud version. - // - // See https://github.com/google/go-github/pull/2125 for full context. - - // https://developer.github.com/changes/2014-12-09-new-attributes-for-stars-api/ - mediaTypeStarringPreview = "application/vnd.github.v3.star+json" - - // https://help.github.com/enterprise/2.4/admin/guides/migrations/exporting-the-github-com-organization-s-repositories/ - mediaTypeMigrationsPreview = "application/vnd.github.wyandotte-preview+json" - - // https://developer.github.com/changes/2016-04-06-deployment-and-deployment-status-enhancements/ - mediaTypeDeploymentStatusPreview = "application/vnd.github.ant-man-preview+json" - - // https://developer.github.com/changes/2018-10-16-deployments-environments-states-and-auto-inactive-updates/ - mediaTypeExpandDeploymentStatusPreview = "application/vnd.github.flash-preview+json" - - // https://developer.github.com/changes/2016-05-12-reactions-api-preview/ - mediaTypeReactionsPreview = "application/vnd.github.squirrel-girl-preview" - - // https://developer.github.com/changes/2016-05-23-timeline-preview-api/ - mediaTypeTimelinePreview = "application/vnd.github.mockingbird-preview+json" - - // https://developer.github.com/changes/2016-09-14-projects-api/ - mediaTypeProjectsPreview = "application/vnd.github.inertia-preview+json" - - // https://developer.github.com/changes/2017-01-05-commit-search-api/ - mediaTypeCommitSearchPreview = "application/vnd.github.cloak-preview+json" - - // https://developer.github.com/changes/2017-02-28-user-blocking-apis-and-webhook/ - mediaTypeBlockUsersPreview = "application/vnd.github.giant-sentry-fist-preview+json" - - // https://developer.github.com/changes/2017-05-23-coc-api/ - mediaTypeCodesOfConductPreview = "application/vnd.github.scarlet-witch-preview+json" - - // https://developer.github.com/changes/2017-07-17-update-topics-on-repositories/ - mediaTypeTopicsPreview = "application/vnd.github.mercy-preview+json" - - // https://developer.github.com/changes/2018-03-16-protected-branches-required-approving-reviews/ - mediaTypeRequiredApprovingReviewsPreview = "application/vnd.github.luke-cage-preview+json" - - // https://developer.github.com/changes/2018-05-07-new-checks-api-public-beta/ - mediaTypeCheckRunsPreview = "application/vnd.github.antiope-preview+json" - - // https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/ - mediaTypePreReceiveHooksPreview = "application/vnd.github.eye-scream-preview" - - // https://developer.github.com/changes/2018-02-22-protected-branches-required-signatures/ - mediaTypeSignaturePreview = "application/vnd.github.zzzax-preview+json" - - // https://developer.github.com/changes/2018-09-05-project-card-events/ - mediaTypeProjectCardDetailsPreview = "application/vnd.github.starfox-preview+json" - - // https://developer.github.com/changes/2018-12-18-interactions-preview/ - mediaTypeInteractionRestrictionsPreview = "application/vnd.github.sombra-preview+json" - - // https://developer.github.com/changes/2019-03-14-enabling-disabling-pages/ - mediaTypeEnablePagesAPIPreview = "application/vnd.github.switcheroo-preview+json" - - // https://developer.github.com/changes/2019-04-24-vulnerability-alerts/ - mediaTypeRequiredVulnerabilityAlertsPreview = "application/vnd.github.dorian-preview+json" - - // https://developer.github.com/changes/2019-06-04-automated-security-fixes/ - mediaTypeRequiredAutomatedSecurityFixesPreview = "application/vnd.github.london-preview+json" - - // https://developer.github.com/changes/2019-05-29-update-branch-api/ - mediaTypeUpdatePullRequestBranchPreview = "application/vnd.github.lydian-preview+json" - - // https://developer.github.com/changes/2019-04-11-pulls-branches-for-commit/ - mediaTypeListPullsOrBranchesForCommitPreview = "application/vnd.github.groot-preview+json" - - // https://docs.github.com/en/rest/previews/#repository-creation-permissions - mediaTypeMemberAllowedRepoCreationTypePreview = "application/vnd.github.surtur-preview+json" - - // https://docs.github.com/en/rest/previews/#create-and-use-repository-templates - mediaTypeRepositoryTemplatePreview = "application/vnd.github.baptiste-preview+json" - - // https://developer.github.com/changes/2019-10-03-multi-line-comments/ - mediaTypeMultiLineCommentsPreview = "application/vnd.github.comfort-fade-preview+json" - - // https://developer.github.com/changes/2019-11-05-deprecated-passwords-and-authorizations-api/ - mediaTypeOAuthAppPreview = "application/vnd.github.doctor-strange-preview+json" - - // https://developer.github.com/changes/2019-12-03-internal-visibility-changes/ - mediaTypeRepositoryVisibilityPreview = "application/vnd.github.nebula-preview+json" - - // https://developer.github.com/changes/2018-12-10-content-attachments-api/ - mediaTypeContentAttachmentsPreview = "application/vnd.github.corsair-preview+json" -) - -var errNonNilContext = errors.New("context must be non-nil") - -// A Client manages communication with the GitHub API. -type Client struct { - clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func. - client *http.Client // HTTP client used to communicate with the API. - - // Base URL for API requests. Defaults to the public GitHub API, but can be - // set to a domain endpoint to use with GitHub Enterprise. BaseURL should - // always be specified with a trailing slash. - BaseURL *url.URL - - // Base URL for uploading files. - UploadURL *url.URL - - // User agent used when communicating with the GitHub API. - UserAgent string - - rateMu sync.Mutex - rateLimits [categories]Rate // Rate limits for the client as determined by the most recent API calls. - secondaryRateLimitReset time.Time // Secondary rate limit reset for the client as determined by the most recent API calls. - - common service // Reuse a single struct instead of allocating one for each service on the heap. - - // Services used for talking to different parts of the GitHub API. - Actions *ActionsService - Activity *ActivityService - Admin *AdminService - Apps *AppsService - Authorizations *AuthorizationsService - Billing *BillingService - Checks *ChecksService - CodeScanning *CodeScanningService - Codespaces *CodespacesService - Dependabot *DependabotService - Enterprise *EnterpriseService - Gists *GistsService - Git *GitService - Gitignores *GitignoresService - Interactions *InteractionsService - IssueImport *IssueImportService - Issues *IssuesService - Licenses *LicensesService - Marketplace *MarketplaceService - Migrations *MigrationService - Organizations *OrganizationsService - Projects *ProjectsService - PullRequests *PullRequestsService - Reactions *ReactionsService - Repositories *RepositoriesService - SCIM *SCIMService - Search *SearchService - SecretScanning *SecretScanningService - Teams *TeamsService - Users *UsersService -} - -type service struct { - client *Client -} - -// Client returns the http.Client used by this GitHub client. -func (c *Client) Client() *http.Client { - c.clientMu.Lock() - defer c.clientMu.Unlock() - clientCopy := *c.client - return &clientCopy -} - -// ListOptions specifies the optional parameters to various List methods that -// support offset pagination. -type ListOptions struct { - // For paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty"` - - // For paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty"` -} - -// ListCursorOptions specifies the optional parameters to various List methods that -// support cursor pagination. -type ListCursorOptions struct { - // For paginated result sets, page of results to retrieve. - Page string `url:"page,omitempty"` - - // For paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty"` - - // For paginated result sets, the number of results per page (max 100), starting from the first matching result. - // This parameter must not be used in combination with last. - First int `url:"first,omitempty"` - - // For paginated result sets, the number of results per page (max 100), starting from the last matching result. - // This parameter must not be used in combination with first. - Last int `url:"last,omitempty"` - - // A cursor, as given in the Link header. If specified, the query only searches for events after this cursor. - After string `url:"after,omitempty"` - - // A cursor, as given in the Link header. If specified, the query only searches for events before this cursor. - Before string `url:"before,omitempty"` - - // A cursor, as given in the Link header. If specified, the query continues the search using this cursor. - Cursor string `url:"cursor,omitempty"` -} - -// UploadOptions specifies the parameters to methods that support uploads. -type UploadOptions struct { - Name string `url:"name,omitempty"` - Label string `url:"label,omitempty"` - MediaType string `url:"-"` -} - -// RawType represents type of raw format of a request instead of JSON. -type RawType uint8 - -const ( - // Diff format. - Diff RawType = 1 + iota - // Patch format. - Patch -) - -// RawOptions specifies parameters when user wants to get raw format of -// a response instead of JSON. -type RawOptions struct { - Type RawType -} - -// addOptions adds the parameters in opts as URL query parameters to s. opts -// must be a struct whose fields may contain "url" tags. -func addOptions(s string, opts interface{}) (string, error) { - v := reflect.ValueOf(opts) - if v.Kind() == reflect.Ptr && v.IsNil() { - return s, nil - } - - u, err := url.Parse(s) - if err != nil { - return s, err - } - - qs, err := query.Values(opts) - if err != nil { - return s, err - } - - u.RawQuery = qs.Encode() - return u.String(), nil -} - -// NewClient returns a new GitHub API client. If a nil httpClient is -// provided, a new http.Client will be used. To use API methods which require -// authentication, provide an http.Client that will perform the authentication -// for you (such as that provided by the golang.org/x/oauth2 library). -func NewClient(httpClient *http.Client) *Client { - if httpClient == nil { - httpClient = &http.Client{} - } - baseURL, _ := url.Parse(defaultBaseURL) - uploadURL, _ := url.Parse(uploadBaseURL) - - c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: defaultUserAgent, UploadURL: uploadURL} - c.common.client = c - c.Actions = (*ActionsService)(&c.common) - c.Activity = (*ActivityService)(&c.common) - c.Admin = (*AdminService)(&c.common) - c.Apps = (*AppsService)(&c.common) - c.Authorizations = (*AuthorizationsService)(&c.common) - c.Billing = (*BillingService)(&c.common) - c.Checks = (*ChecksService)(&c.common) - c.CodeScanning = (*CodeScanningService)(&c.common) - c.Codespaces = (*CodespacesService)(&c.common) - c.Dependabot = (*DependabotService)(&c.common) - c.Enterprise = (*EnterpriseService)(&c.common) - c.Gists = (*GistsService)(&c.common) - c.Git = (*GitService)(&c.common) - c.Gitignores = (*GitignoresService)(&c.common) - c.Interactions = (*InteractionsService)(&c.common) - c.IssueImport = (*IssueImportService)(&c.common) - c.Issues = (*IssuesService)(&c.common) - c.Licenses = (*LicensesService)(&c.common) - c.Marketplace = &MarketplaceService{client: c} - c.Migrations = (*MigrationService)(&c.common) - c.Organizations = (*OrganizationsService)(&c.common) - c.Projects = (*ProjectsService)(&c.common) - c.PullRequests = (*PullRequestsService)(&c.common) - c.Reactions = (*ReactionsService)(&c.common) - c.Repositories = (*RepositoriesService)(&c.common) - c.SCIM = (*SCIMService)(&c.common) - c.Search = (*SearchService)(&c.common) - c.SecretScanning = (*SecretScanningService)(&c.common) - c.Teams = (*TeamsService)(&c.common) - c.Users = (*UsersService)(&c.common) - return c -} - -// NewClientWithEnvProxy enhances NewClient with the HttpProxy env. -func NewClientWithEnvProxy() *Client { - return NewClient(&http.Client{Transport: &http.Transport{Proxy: http.ProxyFromEnvironment}}) -} - -// NewTokenClient returns a new GitHub API client authenticated with the provided token. -func NewTokenClient(ctx context.Context, token string) *Client { - return NewClient(oauth2.NewClient(ctx, oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}))) -} - -// NewEnterpriseClient returns a new GitHub API client with provided -// base URL and upload URL (often is your GitHub Enterprise hostname). -// If the base URL does not have the suffix "/api/v3/", it will be added automatically. -// If the upload URL does not have the suffix "/api/uploads", it will be added automatically. -// If a nil httpClient is provided, a new http.Client will be used. -// -// Note that NewEnterpriseClient is a convenience helper only; -// its behavior is equivalent to using NewClient, followed by setting -// the BaseURL and UploadURL fields. -// -// Another important thing is that by default, the GitHub Enterprise URL format -// should be http(s)://[hostname]/api/v3/ or you will always receive the 406 status code. -// The upload URL format should be http(s)://[hostname]/api/uploads/. -func NewEnterpriseClient(baseURL, uploadURL string, httpClient *http.Client) (*Client, error) { - baseEndpoint, err := url.Parse(baseURL) - if err != nil { - return nil, err - } - - if !strings.HasSuffix(baseEndpoint.Path, "/") { - baseEndpoint.Path += "/" - } - if !strings.HasSuffix(baseEndpoint.Path, "/api/v3/") && - !strings.HasPrefix(baseEndpoint.Host, "api.") && - !strings.Contains(baseEndpoint.Host, ".api.") { - baseEndpoint.Path += "api/v3/" - } - - uploadEndpoint, err := url.Parse(uploadURL) - if err != nil { - return nil, err - } - - if !strings.HasSuffix(uploadEndpoint.Path, "/") { - uploadEndpoint.Path += "/" - } - if !strings.HasSuffix(uploadEndpoint.Path, "/api/uploads/") && - !strings.HasPrefix(uploadEndpoint.Host, "api.") && - !strings.Contains(uploadEndpoint.Host, ".api.") { - uploadEndpoint.Path += "api/uploads/" - } - - c := NewClient(httpClient) - c.BaseURL = baseEndpoint - c.UploadURL = uploadEndpoint - return c, nil -} - -// RequestOption represents an option that can modify an http.Request. -type RequestOption func(req *http.Request) - -// WithVersion overrides the GitHub v3 API version for this individual request. -// For more information, see: -// https://github.blog/2022-11-28-to-infinity-and-beyond-enabling-the-future-of-githubs-rest-api-with-api-versioning/ -func WithVersion(version string) RequestOption { - return func(req *http.Request) { - req.Header.Set(headerAPIVersion, version) - } -} - -// NewRequest creates an API request. A relative URL can be provided in urlStr, -// in which case it is resolved relative to the BaseURL of the Client. -// Relative URLs should always be specified without a preceding slash. If -// specified, the value pointed to by body is JSON encoded and included as the -// request body. -func (c *Client) NewRequest(method, urlStr string, body interface{}, opts ...RequestOption) (*http.Request, error) { - if !strings.HasSuffix(c.BaseURL.Path, "/") { - return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) - } - - u, err := c.BaseURL.Parse(urlStr) - if err != nil { - return nil, err - } - - var buf io.ReadWriter - if body != nil { - buf = &bytes.Buffer{} - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err := enc.Encode(body) - if err != nil { - return nil, err - } - } - - req, err := http.NewRequest(method, u.String(), buf) - if err != nil { - return nil, err - } - - if body != nil { - req.Header.Set("Content-Type", "application/json") - } - req.Header.Set("Accept", mediaTypeV3) - if c.UserAgent != "" { - req.Header.Set("User-Agent", c.UserAgent) - } - req.Header.Set(headerAPIVersion, defaultAPIVersion) - - for _, opt := range opts { - opt(req) - } - - return req, nil -} - -// NewFormRequest creates an API request. A relative URL can be provided in urlStr, -// in which case it is resolved relative to the BaseURL of the Client. -// Relative URLs should always be specified without a preceding slash. -// Body is sent with Content-Type: application/x-www-form-urlencoded. -func (c *Client) NewFormRequest(urlStr string, body io.Reader, opts ...RequestOption) (*http.Request, error) { - if !strings.HasSuffix(c.BaseURL.Path, "/") { - return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) - } - - u, err := c.BaseURL.Parse(urlStr) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(http.MethodPost, u.String(), body) - if err != nil { - return nil, err - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.Header.Set("Accept", mediaTypeV3) - if c.UserAgent != "" { - req.Header.Set("User-Agent", c.UserAgent) - } - req.Header.Set(headerAPIVersion, defaultAPIVersion) - - for _, opt := range opts { - opt(req) - } - - return req, nil -} - -// NewUploadRequest creates an upload request. A relative URL can be provided in -// urlStr, in which case it is resolved relative to the UploadURL of the Client. -// Relative URLs should always be specified without a preceding slash. -func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string, opts ...RequestOption) (*http.Request, error) { - if !strings.HasSuffix(c.UploadURL.Path, "/") { - return nil, fmt.Errorf("UploadURL must have a trailing slash, but %q does not", c.UploadURL) - } - u, err := c.UploadURL.Parse(urlStr) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", u.String(), reader) - if err != nil { - return nil, err - } - - req.ContentLength = size - - if mediaType == "" { - mediaType = defaultMediaType - } - req.Header.Set("Content-Type", mediaType) - req.Header.Set("Accept", mediaTypeV3) - req.Header.Set("User-Agent", c.UserAgent) - req.Header.Set(headerAPIVersion, defaultAPIVersion) - - for _, opt := range opts { - opt(req) - } - - return req, nil -} - -// Response is a GitHub API response. This wraps the standard http.Response -// returned from GitHub and provides convenient access to things like -// pagination links. -type Response struct { - *http.Response - - // These fields provide the page values for paginating through a set of - // results. Any or all of these may be set to the zero value for - // responses that are not part of a paginated set, or for which there - // are no additional pages. - // - // These fields support what is called "offset pagination" and should - // be used with the ListOptions struct. - NextPage int - PrevPage int - FirstPage int - LastPage int - - // Additionally, some APIs support "cursor pagination" instead of offset. - // This means that a token points directly to the next record which - // can lead to O(1) performance compared to O(n) performance provided - // by offset pagination. - // - // For APIs that support cursor pagination (such as - // TeamsService.ListIDPGroupsInOrganization), the following field - // will be populated to point to the next page. - // - // To use this token, set ListCursorOptions.Page to this value before - // calling the endpoint again. - NextPageToken string - - // For APIs that support cursor pagination, such as RepositoriesService.ListHookDeliveries, - // the following field will be populated to point to the next page. - // Set ListCursorOptions.Cursor to this value when calling the endpoint again. - Cursor string - - // For APIs that support before/after pagination, such as OrganizationsService.AuditLog. - Before string - After string - - // Explicitly specify the Rate type so Rate's String() receiver doesn't - // propagate to Response. - Rate Rate - - // token's expiration date. Timestamp is 0001-01-01 when token doesn't expire. - // So it is valid for TokenExpiration.Equal(Timestamp{}) or TokenExpiration.Time.After(time.Now()) - TokenExpiration Timestamp -} - -// newResponse creates a new Response for the provided http.Response. -// r must not be nil. -func newResponse(r *http.Response) *Response { - response := &Response{Response: r} - response.populatePageValues() - response.Rate = parseRate(r) - response.TokenExpiration = parseTokenExpiration(r) - return response -} - -// populatePageValues parses the HTTP Link response headers and populates the -// various pagination link values in the Response. -func (r *Response) populatePageValues() { - if links, ok := r.Response.Header["Link"]; ok && len(links) > 0 { - for _, link := range strings.Split(links[0], ",") { - segments := strings.Split(strings.TrimSpace(link), ";") - - // link must at least have href and rel - if len(segments) < 2 { - continue - } - - // ensure href is properly formatted - if !strings.HasPrefix(segments[0], "<") || !strings.HasSuffix(segments[0], ">") { - continue - } - - // try to pull out page parameter - url, err := url.Parse(segments[0][1 : len(segments[0])-1]) - if err != nil { - continue - } - - q := url.Query() - - if cursor := q.Get("cursor"); cursor != "" { - for _, segment := range segments[1:] { - switch strings.TrimSpace(segment) { - case `rel="next"`: - r.Cursor = cursor - } - } - - continue - } - - page := q.Get("page") - since := q.Get("since") - before := q.Get("before") - after := q.Get("after") - - if page == "" && before == "" && after == "" && since == "" { - continue - } - - if since != "" && page == "" { - page = since - } - - for _, segment := range segments[1:] { - switch strings.TrimSpace(segment) { - case `rel="next"`: - if r.NextPage, err = strconv.Atoi(page); err != nil { - r.NextPageToken = page - } - r.After = after - case `rel="prev"`: - r.PrevPage, _ = strconv.Atoi(page) - r.Before = before - case `rel="first"`: - r.FirstPage, _ = strconv.Atoi(page) - case `rel="last"`: - r.LastPage, _ = strconv.Atoi(page) - } - } - } - } -} - -// parseRate parses the rate related headers. -func parseRate(r *http.Response) Rate { - var rate Rate - if limit := r.Header.Get(headerRateLimit); limit != "" { - rate.Limit, _ = strconv.Atoi(limit) - } - if remaining := r.Header.Get(headerRateRemaining); remaining != "" { - rate.Remaining, _ = strconv.Atoi(remaining) - } - if reset := r.Header.Get(headerRateReset); reset != "" { - if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { - rate.Reset = Timestamp{time.Unix(v, 0)} - } - } - return rate -} - -// parseSecondaryRate parses the secondary rate related headers, -// and returns the time to retry after. -func parseSecondaryRate(r *http.Response) *time.Duration { - // According to GitHub support, the "Retry-After" header value will be - // an integer which represents the number of seconds that one should - // wait before resuming making requests. - if v := r.Header.Get(headerRetryAfter); v != "" { - retryAfterSeconds, _ := strconv.ParseInt(v, 10, 64) // Error handling is noop. - retryAfter := time.Duration(retryAfterSeconds) * time.Second - return &retryAfter - } - - // According to GitHub support, endpoints might return x-ratelimit-reset instead, - // as an integer which represents the number of seconds since epoch UTC, - // represting the time to resume making requests. - if v := r.Header.Get(headerRateReset); v != "" { - secondsSinceEpoch, _ := strconv.ParseInt(v, 10, 64) // Error handling is noop. - retryAfter := time.Until(time.Unix(secondsSinceEpoch, 0)) - return &retryAfter - } - - return nil -} - -// parseTokenExpiration parses the TokenExpiration related headers. -// Returns 0001-01-01 if the header is not defined or could not be parsed. -func parseTokenExpiration(r *http.Response) Timestamp { - if v := r.Header.Get(headerTokenExpiration); v != "" { - if t, err := time.Parse("2006-01-02 15:04:05 MST", v); err == nil { - return Timestamp{t.Local()} - } - // Some tokens include the timezone offset instead of the timezone. - // https://github.com/google/go-github/issues/2649 - if t, err := time.Parse("2006-01-02 15:04:05 -0700", v); err == nil { - return Timestamp{t.Local()} - } - } - return Timestamp{} // 0001-01-01 00:00:00 -} - -type requestContext uint8 - -const ( - bypassRateLimitCheck requestContext = iota -) - -// BareDo sends an API request and lets you handle the api response. If an error -// or API Error occurs, the error will contain more information. Otherwise you -// are supposed to read and close the response's Body. If rate limit is exceeded -// and reset time is in the future, BareDo returns *RateLimitError immediately -// without making a network API call. -// -// The provided ctx must be non-nil, if it is nil an error is returned. If it is -// canceled or times out, ctx.Err() will be returned. -func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, error) { - if ctx == nil { - return nil, errNonNilContext - } - - req = withContext(ctx, req) - - rateLimitCategory := category(req.Method, req.URL.Path) - - if bypass := ctx.Value(bypassRateLimitCheck); bypass == nil { - // If we've hit rate limit, don't make further requests before Reset time. - if err := c.checkRateLimitBeforeDo(req, rateLimitCategory); err != nil { - return &Response{ - Response: err.Response, - Rate: err.Rate, - }, err - } - // If we've hit a secondary rate limit, don't make further requests before Retry After. - if err := c.checkSecondaryRateLimitBeforeDo(ctx, req); err != nil { - return &Response{ - Response: err.Response, - }, err - } - } - - resp, err := c.client.Do(req) - if err != nil { - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - // If the error type is *url.Error, sanitize its URL before returning. - if e, ok := err.(*url.Error); ok { - if url, err := url.Parse(e.URL); err == nil { - e.URL = sanitizeURL(url).String() - return nil, e - } - } - - return nil, err - } - - response := newResponse(resp) - - // Don't update the rate limits if this was a cached response. - // X-From-Cache is set by https://github.com/gregjones/httpcache - if response.Header.Get("X-From-Cache") == "" { - c.rateMu.Lock() - c.rateLimits[rateLimitCategory] = response.Rate - c.rateMu.Unlock() - } - - err = CheckResponse(resp) - if err != nil { - defer resp.Body.Close() - // Special case for AcceptedErrors. If an AcceptedError - // has been encountered, the response's payload will be - // added to the AcceptedError and returned. - // - // Issue #1022 - aerr, ok := err.(*AcceptedError) - if ok { - b, readErr := io.ReadAll(resp.Body) - if readErr != nil { - return response, readErr - } - - aerr.Raw = b - err = aerr - } - - // Update the secondary rate limit if we hit it. - rerr, ok := err.(*AbuseRateLimitError) - if ok && rerr.RetryAfter != nil { - c.rateMu.Lock() - c.secondaryRateLimitReset = time.Now().Add(*rerr.RetryAfter) - c.rateMu.Unlock() - } - } - return response, err -} - -// Do sends an API request and returns the API response. The API response is -// JSON decoded and stored in the value pointed to by v, or returned as an -// error if an API error has occurred. If v implements the io.Writer interface, -// the raw response body will be written to v, without attempting to first -// decode it. If v is nil, and no error hapens, the response is returned as is. -// If rate limit is exceeded and reset time is in the future, Do returns -// *RateLimitError immediately without making a network API call. -// -// The provided ctx must be non-nil, if it is nil an error is returned. If it -// is canceled or times out, ctx.Err() will be returned. -func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { - resp, err := c.BareDo(ctx, req) - if err != nil { - return resp, err - } - defer resp.Body.Close() - - switch v := v.(type) { - case nil: - case io.Writer: - _, err = io.Copy(v, resp.Body) - default: - decErr := json.NewDecoder(resp.Body).Decode(v) - if decErr == io.EOF { - decErr = nil // ignore EOF errors caused by empty response body - } - if decErr != nil { - err = decErr - } - } - return resp, err -} - -// checkRateLimitBeforeDo does not make any network calls, but uses existing knowledge from -// current client state in order to quickly check if *RateLimitError can be immediately returned -// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily. -// Otherwise it returns nil, and Client.Do should proceed normally. -func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) *RateLimitError { - c.rateMu.Lock() - rate := c.rateLimits[rateLimitCategory] - c.rateMu.Unlock() - if !rate.Reset.Time.IsZero() && rate.Remaining == 0 && time.Now().Before(rate.Reset.Time) { - // Create a fake response. - resp := &http.Response{ - Status: http.StatusText(http.StatusForbidden), - StatusCode: http.StatusForbidden, - Request: req, - Header: make(http.Header), - Body: io.NopCloser(strings.NewReader("")), - } - return &RateLimitError{ - Rate: rate, - Response: resp, - Message: fmt.Sprintf("API rate limit of %v still exceeded until %v, not making remote request.", rate.Limit, rate.Reset.Time), - } - } - - return nil -} - -// checkSecondaryRateLimitBeforeDo does not make any network calls, but uses existing knowledge from -// current client state in order to quickly check if *AbuseRateLimitError can be immediately returned -// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily. -// Otherwise it returns nil, and Client.Do should proceed normally. -func (c *Client) checkSecondaryRateLimitBeforeDo(ctx context.Context, req *http.Request) *AbuseRateLimitError { - c.rateMu.Lock() - secondary := c.secondaryRateLimitReset - c.rateMu.Unlock() - if !secondary.IsZero() && time.Now().Before(secondary) { - // Create a fake response. - resp := &http.Response{ - Status: http.StatusText(http.StatusForbidden), - StatusCode: http.StatusForbidden, - Request: req, - Header: make(http.Header), - Body: io.NopCloser(strings.NewReader("")), - } - - retryAfter := time.Until(secondary) - return &AbuseRateLimitError{ - Response: resp, - Message: fmt.Sprintf("API secondary rate limit exceeded until %v, not making remote request.", secondary), - RetryAfter: &retryAfter, - } - } - - return nil -} - -// compareHTTPResponse returns whether two http.Response objects are equal or not. -// Currently, only StatusCode is checked. This function is used when implementing the -// Is(error) bool interface for the custom error types in this package. -func compareHTTPResponse(r1, r2 *http.Response) bool { - if r1 == nil && r2 == nil { - return true - } - - if r1 != nil && r2 != nil { - return r1.StatusCode == r2.StatusCode - } - return false -} - -/* -An ErrorResponse reports one or more errors caused by an API request. - -GitHub API docs: https://docs.github.com/en/rest/#client-errors -*/ -type ErrorResponse struct { - Response *http.Response `json:"-"` // HTTP response that caused this error - Message string `json:"message"` // error message - Errors []Error `json:"errors"` // more detail on individual errors - // Block is only populated on certain types of errors such as code 451. - Block *ErrorBlock `json:"block,omitempty"` - // Most errors will also include a documentation_url field pointing - // to some content that might help you resolve the error, see - // https://docs.github.com/en/rest/#client-errors - DocumentationURL string `json:"documentation_url,omitempty"` -} - -// ErrorBlock contains a further explanation for the reason of an error. -// See https://developer.github.com/changes/2016-03-17-the-451-status-code-is-now-supported/ -// for more information. -type ErrorBlock struct { - Reason string `json:"reason,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` -} - -func (r *ErrorResponse) Error() string { - return fmt.Sprintf("%v %v: %d %v %+v", - r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message, r.Errors) -} - -// Is returns whether the provided error equals this error. -func (r *ErrorResponse) Is(target error) bool { - v, ok := target.(*ErrorResponse) - if !ok { - return false - } - - if r.Message != v.Message || (r.DocumentationURL != v.DocumentationURL) || - !compareHTTPResponse(r.Response, v.Response) { - return false - } - - // Compare Errors. - if len(r.Errors) != len(v.Errors) { - return false - } - for idx := range r.Errors { - if r.Errors[idx] != v.Errors[idx] { - return false - } - } - - // Compare Block. - if (r.Block != nil && v.Block == nil) || (r.Block == nil && v.Block != nil) { - return false - } - if r.Block != nil && v.Block != nil { - if r.Block.Reason != v.Block.Reason { - return false - } - if (r.Block.CreatedAt != nil && v.Block.CreatedAt == nil) || (r.Block.CreatedAt == - nil && v.Block.CreatedAt != nil) { - return false - } - if r.Block.CreatedAt != nil && v.Block.CreatedAt != nil { - if *(r.Block.CreatedAt) != *(v.Block.CreatedAt) { - return false - } - } - } - - return true -} - -// TwoFactorAuthError occurs when using HTTP Basic Authentication for a user -// that has two-factor authentication enabled. The request can be reattempted -// by providing a one-time password in the request. -type TwoFactorAuthError ErrorResponse - -func (r *TwoFactorAuthError) Error() string { return (*ErrorResponse)(r).Error() } - -// RateLimitError occurs when GitHub returns 403 Forbidden response with a rate limit -// remaining value of 0. -type RateLimitError struct { - Rate Rate // Rate specifies last known rate limit for the client - Response *http.Response // HTTP response that caused this error - Message string `json:"message"` // error message -} - -func (r *RateLimitError) Error() string { - return fmt.Sprintf("%v %v: %d %v %v", - r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message, formatRateReset(time.Until(r.Rate.Reset.Time))) -} - -// Is returns whether the provided error equals this error. -func (r *RateLimitError) Is(target error) bool { - v, ok := target.(*RateLimitError) - if !ok { - return false - } - - return r.Rate == v.Rate && - r.Message == v.Message && - compareHTTPResponse(r.Response, v.Response) -} - -// AcceptedError occurs when GitHub returns 202 Accepted response with an -// empty body, which means a job was scheduled on the GitHub side to process -// the information needed and cache it. -// Technically, 202 Accepted is not a real error, it's just used to -// indicate that results are not ready yet, but should be available soon. -// The request can be repeated after some time. -type AcceptedError struct { - // Raw contains the response body. - Raw []byte -} - -func (*AcceptedError) Error() string { - return "job scheduled on GitHub side; try again later" -} - -// Is returns whether the provided error equals this error. -func (ae *AcceptedError) Is(target error) bool { - v, ok := target.(*AcceptedError) - if !ok { - return false - } - return bytes.Compare(ae.Raw, v.Raw) == 0 -} - -// AbuseRateLimitError occurs when GitHub returns 403 Forbidden response with the -// "documentation_url" field value equal to "https://docs.github.com/en/rest/overview/resources-in-the-rest-api#secondary-rate-limits". -type AbuseRateLimitError struct { - Response *http.Response // HTTP response that caused this error - Message string `json:"message"` // error message - - // RetryAfter is provided with some abuse rate limit errors. If present, - // it is the amount of time that the client should wait before retrying. - // Otherwise, the client should try again later (after an unspecified amount of time). - RetryAfter *time.Duration -} - -func (r *AbuseRateLimitError) Error() string { - return fmt.Sprintf("%v %v: %d %v", - r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message) -} - -// Is returns whether the provided error equals this error. -func (r *AbuseRateLimitError) Is(target error) bool { - v, ok := target.(*AbuseRateLimitError) - if !ok { - return false - } - - return r.Message == v.Message && - r.RetryAfter == v.RetryAfter && - compareHTTPResponse(r.Response, v.Response) -} - -// sanitizeURL redacts the client_secret parameter from the URL which may be -// exposed to the user. -func sanitizeURL(uri *url.URL) *url.URL { - if uri == nil { - return nil - } - params := uri.Query() - if len(params.Get("client_secret")) > 0 { - params.Set("client_secret", "REDACTED") - uri.RawQuery = params.Encode() - } - return uri -} - -/* -An Error reports more details on an individual error in an ErrorResponse. -These are the possible validation error codes: - - missing: - resource does not exist - missing_field: - a required field on a resource has not been set - invalid: - the formatting of a field is invalid - already_exists: - another resource has the same valid as this field - custom: - some resources return this (e.g. github.User.CreateKey()), additional - information is set in the Message field of the Error - -GitHub error responses structure are often undocumented and inconsistent. -Sometimes error is just a simple string (Issue #540). -In such cases, Message represents an error message as a workaround. - -GitHub API docs: https://docs.github.com/en/rest/#client-errors -*/ -type Error struct { - Resource string `json:"resource"` // resource on which the error occurred - Field string `json:"field"` // field on which the error occurred - Code string `json:"code"` // validation error code - Message string `json:"message"` // Message describing the error. Errors with Code == "custom" will always have this set. -} - -func (e *Error) Error() string { - return fmt.Sprintf("%v error caused by %v field on %v resource", - e.Code, e.Field, e.Resource) -} - -func (e *Error) UnmarshalJSON(data []byte) error { - type aliasError Error // avoid infinite recursion by using type alias. - if err := json.Unmarshal(data, (*aliasError)(e)); err != nil { - return json.Unmarshal(data, &e.Message) // data can be json string. - } - return nil -} - -// CheckResponse checks the API response for errors, and returns them if -// present. A response is considered an error if it has a status code outside -// the 200 range or equal to 202 Accepted. -// API error responses are expected to have response -// body, and a JSON response body that maps to ErrorResponse. -// -// The error type will be *RateLimitError for rate limit exceeded errors, -// *AcceptedError for 202 Accepted status codes, -// and *TwoFactorAuthError for two-factor authentication errors. -func CheckResponse(r *http.Response) error { - if r.StatusCode == http.StatusAccepted { - return &AcceptedError{} - } - if c := r.StatusCode; 200 <= c && c <= 299 { - return nil - } - - errorResponse := &ErrorResponse{Response: r} - data, err := io.ReadAll(r.Body) - if err == nil && data != nil { - json.Unmarshal(data, errorResponse) - } - // Re-populate error response body because GitHub error responses are often - // undocumented and inconsistent. - // Issue #1136, #540. - r.Body = io.NopCloser(bytes.NewBuffer(data)) - switch { - case r.StatusCode == http.StatusUnauthorized && strings.HasPrefix(r.Header.Get(headerOTP), "required"): - return (*TwoFactorAuthError)(errorResponse) - case r.StatusCode == http.StatusForbidden && r.Header.Get(headerRateRemaining) == "0": - return &RateLimitError{ - Rate: parseRate(r), - Response: errorResponse.Response, - Message: errorResponse.Message, - } - case r.StatusCode == http.StatusForbidden && - (strings.HasSuffix(errorResponse.DocumentationURL, "#abuse-rate-limits") || - strings.HasSuffix(errorResponse.DocumentationURL, "#secondary-rate-limits")): - abuseRateLimitError := &AbuseRateLimitError{ - Response: errorResponse.Response, - Message: errorResponse.Message, - } - if retryAfter := parseSecondaryRate(r); retryAfter != nil { - abuseRateLimitError.RetryAfter = retryAfter - } - return abuseRateLimitError - default: - return errorResponse - } -} - -// parseBoolResponse determines the boolean result from a GitHub API response. -// Several GitHub API methods return boolean responses indicated by the HTTP -// status code in the response (true indicated by a 204, false indicated by a -// 404). This helper function will determine that result and hide the 404 -// error if present. Any other error will be returned through as-is. -func parseBoolResponse(err error) (bool, error) { - if err == nil { - return true, nil - } - - if err, ok := err.(*ErrorResponse); ok && err.Response.StatusCode == http.StatusNotFound { - // Simply false. In this one case, we do not pass the error through. - return false, nil - } - - // some other real error occurred - return false, err -} - -// Rate represents the rate limit for the current client. -type Rate struct { - // The number of requests per hour the client is currently limited to. - Limit int `json:"limit"` - - // The number of remaining requests the client can make this hour. - Remaining int `json:"remaining"` - - // The time at which the current rate limit will reset. - Reset Timestamp `json:"reset"` -} - -func (r Rate) String() string { - return Stringify(r) -} - -// RateLimits represents the rate limits for the current client. -type RateLimits struct { - // The rate limit for non-search API requests. Unauthenticated - // requests are limited to 60 per hour. Authenticated requests are - // limited to 5,000 per hour. - // - // GitHub API docs: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting - Core *Rate `json:"core"` - - // The rate limit for search API requests. Unauthenticated requests - // are limited to 10 requests per minutes. Authenticated requests are - // limited to 30 per minute. - // - // GitHub API docs: https://docs.github.com/en/rest/search#rate-limit - Search *Rate `json:"search"` - - // GitHub API docs: https://docs.github.com/en/graphql/overview/resource-limitations#rate-limit - GraphQL *Rate `json:"graphql"` - - // GitHub API dos: https://docs.github.com/en/rest/rate-limit - IntegrationManifest *Rate `json:"integration_manifest"` - - SourceImport *Rate `json:"source_import"` - CodeScanningUpload *Rate `json:"code_scanning_upload"` - ActionsRunnerRegistration *Rate `json:"actions_runner_registration"` - SCIM *Rate `json:"scim"` -} - -func (r RateLimits) String() string { - return Stringify(r) -} - -type rateLimitCategory uint8 - -const ( - coreCategory rateLimitCategory = iota - searchCategory - graphqlCategory - integrationManifestCategory - sourceImportCategory - codeScanningUploadCategory - actionsRunnerRegistrationCategory - scimCategory - - categories // An array of this length will be able to contain all rate limit categories. -) - -// category returns the rate limit category of the endpoint, determined by HTTP method and Request.URL.Path. -func category(method, path string) rateLimitCategory { - switch { - // https://docs.github.com/en/rest/rate-limit#about-rate-limits - default: - // NOTE: coreCategory is returned for actionsRunnerRegistrationCategory too, - // because no API found for this category. - return coreCategory - case strings.HasPrefix(path, "/search/"): - return searchCategory - case path == "/graphql": - return graphqlCategory - case strings.HasPrefix(path, "/app-manifests/") && - strings.HasSuffix(path, "/conversions") && - method == http.MethodPost: - return integrationManifestCategory - - // https://docs.github.com/en/rest/migrations/source-imports#start-an-import - case strings.HasPrefix(path, "/repos/") && - strings.HasSuffix(path, "/import") && - method == http.MethodPut: - return sourceImportCategory - - // https://docs.github.com/en/rest/code-scanning#upload-an-analysis-as-sarif-data - case strings.HasSuffix(path, "/code-scanning/sarifs"): - return codeScanningUploadCategory - - // https://docs.github.com/en/enterprise-cloud@latest/rest/scim - case strings.HasPrefix(path, "/scim/"): - return scimCategory - } -} - -// RateLimits returns the rate limits for the current client. -func (c *Client) RateLimits(ctx context.Context) (*RateLimits, *Response, error) { - req, err := c.NewRequest("GET", "rate_limit", nil) - if err != nil { - return nil, nil, err - } - - response := new(struct { - Resources *RateLimits `json:"resources"` - }) - - // This resource is not subject to rate limits. - ctx = context.WithValue(ctx, bypassRateLimitCheck, true) - resp, err := c.Do(ctx, req, response) - if err != nil { - return nil, resp, err - } - - if response.Resources != nil { - c.rateMu.Lock() - if response.Resources.Core != nil { - c.rateLimits[coreCategory] = *response.Resources.Core - } - if response.Resources.Search != nil { - c.rateLimits[searchCategory] = *response.Resources.Search - } - if response.Resources.GraphQL != nil { - c.rateLimits[graphqlCategory] = *response.Resources.GraphQL - } - if response.Resources.IntegrationManifest != nil { - c.rateLimits[integrationManifestCategory] = *response.Resources.IntegrationManifest - } - if response.Resources.SourceImport != nil { - c.rateLimits[sourceImportCategory] = *response.Resources.SourceImport - } - if response.Resources.CodeScanningUpload != nil { - c.rateLimits[codeScanningUploadCategory] = *response.Resources.CodeScanningUpload - } - if response.Resources.ActionsRunnerRegistration != nil { - c.rateLimits[actionsRunnerRegistrationCategory] = *response.Resources.ActionsRunnerRegistration - } - if response.Resources.SCIM != nil { - c.rateLimits[scimCategory] = *response.Resources.SCIM - } - c.rateMu.Unlock() - } - - return response.Resources, resp, nil -} - -func setCredentialsAsHeaders(req *http.Request, id, secret string) *http.Request { - // To set extra headers, we must make a copy of the Request so - // that we don't modify the Request we were given. This is required by the - // specification of http.RoundTripper. - // - // Since we are going to modify only req.Header here, we only need a deep copy - // of req.Header. - convertedRequest := new(http.Request) - *convertedRequest = *req - convertedRequest.Header = make(http.Header, len(req.Header)) - - for k, s := range req.Header { - convertedRequest.Header[k] = append([]string(nil), s...) - } - convertedRequest.SetBasicAuth(id, secret) - return convertedRequest -} - -/* -UnauthenticatedRateLimitedTransport allows you to make unauthenticated calls -that need to use a higher rate limit associated with your OAuth application. - - t := &github.UnauthenticatedRateLimitedTransport{ - ClientID: "your app's client ID", - ClientSecret: "your app's client secret", - } - client := github.NewClient(t.Client()) - -This will add the client id and secret as a base64-encoded string in the format -ClientID:ClientSecret and apply it as an "Authorization": "Basic" header. - -See https://docs.github.com/en/rest/#unauthenticated-rate-limited-requests for -more information. -*/ -type UnauthenticatedRateLimitedTransport struct { - // ClientID is the GitHub OAuth client ID of the current application, which - // can be found by selecting its entry in the list at - // https://github.com/settings/applications. - ClientID string - - // ClientSecret is the GitHub OAuth client secret of the current - // application. - ClientSecret string - - // Transport is the underlying HTTP transport to use when making requests. - // It will default to http.DefaultTransport if nil. - Transport http.RoundTripper -} - -// RoundTrip implements the RoundTripper interface. -func (t *UnauthenticatedRateLimitedTransport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.ClientID == "" { - return nil, errors.New("t.ClientID is empty") - } - if t.ClientSecret == "" { - return nil, errors.New("t.ClientSecret is empty") - } - - req2 := setCredentialsAsHeaders(req, t.ClientID, t.ClientSecret) - // Make the HTTP request. - return t.transport().RoundTrip(req2) -} - -// Client returns an *http.Client that makes requests which are subject to the -// rate limit of your OAuth application. -func (t *UnauthenticatedRateLimitedTransport) Client() *http.Client { - return &http.Client{Transport: t} -} - -func (t *UnauthenticatedRateLimitedTransport) transport() http.RoundTripper { - if t.Transport != nil { - return t.Transport - } - return http.DefaultTransport -} - -// BasicAuthTransport is an http.RoundTripper that authenticates all requests -// using HTTP Basic Authentication with the provided username and password. It -// additionally supports users who have two-factor authentication enabled on -// their GitHub account. -type BasicAuthTransport struct { - Username string // GitHub username - Password string // GitHub password - OTP string // one-time password for users with two-factor auth enabled - - // Transport is the underlying HTTP transport to use when making requests. - // It will default to http.DefaultTransport if nil. - Transport http.RoundTripper -} - -// RoundTrip implements the RoundTripper interface. -func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := setCredentialsAsHeaders(req, t.Username, t.Password) - if t.OTP != "" { - req2.Header.Set(headerOTP, t.OTP) - } - return t.transport().RoundTrip(req2) -} - -// Client returns an *http.Client that makes requests that are authenticated -// using HTTP Basic Authentication. -func (t *BasicAuthTransport) Client() *http.Client { - return &http.Client{Transport: t} -} - -func (t *BasicAuthTransport) transport() http.RoundTripper { - if t.Transport != nil { - return t.Transport - } - return http.DefaultTransport -} - -// formatRateReset formats d to look like "[rate reset in 2s]" or -// "[rate reset in 87m02s]" for the positive durations. And like "[rate limit was reset 87m02s ago]" -// for the negative cases. -func formatRateReset(d time.Duration) string { - isNegative := d < 0 - if isNegative { - d *= -1 - } - secondsTotal := int(0.5 + d.Seconds()) - minutes := secondsTotal / 60 - seconds := secondsTotal - minutes*60 - - var timeString string - if minutes > 0 { - timeString = fmt.Sprintf("%dm%02ds", minutes, seconds) - } else { - timeString = fmt.Sprintf("%ds", seconds) - } - - if isNegative { - return fmt.Sprintf("[rate limit was reset %v ago]", timeString) - } - return fmt.Sprintf("[rate reset in %v]", timeString) -} - -// When using roundTripWithOptionalFollowRedirect, note that it -// is the responsibility of the caller to close the response body. -func (c *Client) roundTripWithOptionalFollowRedirect(ctx context.Context, u string, followRedirects bool, opts ...RequestOption) (*http.Response, error) { - req, err := c.NewRequest("GET", u, nil, opts...) - if err != nil { - return nil, err - } - - var resp *http.Response - // Use http.DefaultTransport if no custom Transport is configured - req = withContext(ctx, req) - if c.client.Transport == nil { - resp, err = http.DefaultTransport.RoundTrip(req) - } else { - resp, err = c.client.Transport.RoundTrip(req) - } - if err != nil { - return nil, err - } - - // If redirect response is returned, follow it - if followRedirects && resp.StatusCode == http.StatusMovedPermanently { - resp.Body.Close() - u = resp.Header.Get("Location") - resp, err = c.roundTripWithOptionalFollowRedirect(ctx, u, false, opts...) - } - return resp, err -} - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int is a helper routine that allocates a new int value -// to store v and returns a pointer to it. -func Int(v int) *int { return &v } - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/gitignore.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/gitignore.go deleted file mode 100644 index a20a868b44..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/gitignore.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GitignoresService provides access to the gitignore related functions in the -// GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/gitignore/ -type GitignoresService service - -// Gitignore represents a .gitignore file as returned by the GitHub API. -type Gitignore struct { - Name *string `json:"name,omitempty"` - Source *string `json:"source,omitempty"` -} - -func (g Gitignore) String() string { - return Stringify(g) -} - -// List all available Gitignore templates. -// -// GitHub API docs: https://docs.github.com/en/rest/gitignore/#listing-available-templates -func (s *GitignoresService) List(ctx context.Context) ([]string, *Response, error) { - req, err := s.client.NewRequest("GET", "gitignore/templates", nil) - if err != nil { - return nil, nil, err - } - - var availableTemplates []string - resp, err := s.client.Do(ctx, req, &availableTemplates) - if err != nil { - return nil, resp, err - } - - return availableTemplates, resp, nil -} - -// Get a Gitignore by name. -// -// GitHub API docs: https://docs.github.com/en/rest/gitignore#get-a-gitignore-template -func (s *GitignoresService) Get(ctx context.Context, name string) (*Gitignore, *Response, error) { - u := fmt.Sprintf("gitignore/templates/%v", name) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gitignore := new(Gitignore) - resp, err := s.client.Do(ctx, req, gitignore) - if err != nil { - return nil, resp, err - } - - return gitignore, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions.go deleted file mode 100644 index a690f61268..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// InteractionsService handles communication with the repository and organization related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/interactions/ -type InteractionsService service - -// InteractionRestriction represents the interaction restrictions for repository and organization. -type InteractionRestriction struct { - // Specifies the group of GitHub users who can - // comment, open issues, or create pull requests for the given repository. - // Possible values are: "existing_users", "contributors_only" and "collaborators_only". - Limit *string `json:"limit,omitempty"` - - // Origin specifies the type of the resource to interact with. - // Possible values are: "repository" and "organization". - Origin *string `json:"origin,omitempty"` - - // ExpiresAt specifies the time after which the interaction restrictions expire. - // The default expiry time is 24 hours from the time restriction is created. - ExpiresAt *Timestamp `json:"expires_at,omitempty"` -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions_orgs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions_orgs.go deleted file mode 100644 index 5c7663f583..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions_orgs.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GetRestrictionsForOrg fetches the interaction restrictions for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/interactions/orgs#get-interaction-restrictions-for-an-organization -func (s *InteractionsService) GetRestrictionsForOrg(ctx context.Context, organization string) (*InteractionRestriction, *Response, error) { - u := fmt.Sprintf("orgs/%v/interaction-limits", organization) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeInteractionRestrictionsPreview) - - organizationInteractions := new(InteractionRestriction) - - resp, err := s.client.Do(ctx, req, organizationInteractions) - if err != nil { - return nil, resp, err - } - - return organizationInteractions, resp, nil -} - -// UpdateRestrictionsForOrg adds or updates the interaction restrictions for an organization. -// -// limit specifies the group of GitHub users who can comment, open issues, or create pull requests -// in public repositories for the given organization. -// Possible values are: "existing_users", "contributors_only", "collaborators_only". -// -// GitHub API docs: https://docs.github.com/en/rest/interactions/orgs#set-interaction-restrictions-for-an-organization -func (s *InteractionsService) UpdateRestrictionsForOrg(ctx context.Context, organization, limit string) (*InteractionRestriction, *Response, error) { - u := fmt.Sprintf("orgs/%v/interaction-limits", organization) - - interaction := &InteractionRestriction{Limit: String(limit)} - - req, err := s.client.NewRequest("PUT", u, interaction) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeInteractionRestrictionsPreview) - - organizationInteractions := new(InteractionRestriction) - - resp, err := s.client.Do(ctx, req, organizationInteractions) - if err != nil { - return nil, resp, err - } - - return organizationInteractions, resp, nil -} - -// RemoveRestrictionsFromOrg removes the interaction restrictions for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/interactions/orgs#remove-interaction-restrictions-for-an-organization -func (s *InteractionsService) RemoveRestrictionsFromOrg(ctx context.Context, organization string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/interaction-limits", organization) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeInteractionRestrictionsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions_repos.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions_repos.go deleted file mode 100644 index 41e6c5319d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/interactions_repos.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GetRestrictionsForRepo fetches the interaction restrictions for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/interactions/repos#get-interaction-restrictions-for-a-repository -func (s *InteractionsService) GetRestrictionsForRepo(ctx context.Context, owner, repo string) (*InteractionRestriction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/interaction-limits", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeInteractionRestrictionsPreview) - - repositoryInteractions := new(InteractionRestriction) - - resp, err := s.client.Do(ctx, req, repositoryInteractions) - if err != nil { - return nil, resp, err - } - - return repositoryInteractions, resp, nil -} - -// UpdateRestrictionsForRepo adds or updates the interaction restrictions for a repository. -// -// limit specifies the group of GitHub users who can comment, open issues, or create pull requests -// for the given repository. -// Possible values are: "existing_users", "contributors_only", "collaborators_only". -// -// GitHub API docs: https://docs.github.com/en/rest/interactions/repos#set-interaction-restrictions-for-a-repository -func (s *InteractionsService) UpdateRestrictionsForRepo(ctx context.Context, owner, repo, limit string) (*InteractionRestriction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/interaction-limits", owner, repo) - - interaction := &InteractionRestriction{Limit: String(limit)} - - req, err := s.client.NewRequest("PUT", u, interaction) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeInteractionRestrictionsPreview) - - repositoryInteractions := new(InteractionRestriction) - - resp, err := s.client.Do(ctx, req, repositoryInteractions) - if err != nil { - return nil, resp, err - } - - return repositoryInteractions, resp, nil -} - -// RemoveRestrictionsFromRepo removes the interaction restrictions for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/interactions/repos#remove-interaction-restrictions-for-a-repository -func (s *InteractionsService) RemoveRestrictionsFromRepo(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/interaction-limits", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeInteractionRestrictionsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issue_import.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issue_import.go deleted file mode 100644 index 4bc8d5f1d2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issue_import.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "encoding/json" - "fmt" -) - -// IssueImportService handles communication with the issue import related -// methods of the Issue Import GitHub API. -type IssueImportService service - -// IssueImportRequest represents a request to create an issue. -// -// https://gist.github.com/jonmagic/5282384165e0f86ef105#supported-issue-and-comment-fields -type IssueImportRequest struct { - IssueImport IssueImport `json:"issue"` - Comments []*Comment `json:"comments,omitempty"` -} - -// IssueImport represents body of issue to import. -type IssueImport struct { - Title string `json:"title"` - Body string `json:"body"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - ClosedAt *Timestamp `json:"closed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Assignee *string `json:"assignee,omitempty"` - Milestone *int `json:"milestone,omitempty"` - Closed *bool `json:"closed,omitempty"` - Labels []string `json:"labels,omitempty"` -} - -// Comment represents comments of issue to import. -type Comment struct { - CreatedAt *Timestamp `json:"created_at,omitempty"` - Body string `json:"body"` -} - -// IssueImportResponse represents the response of an issue import create request. -// -// https://gist.github.com/jonmagic/5282384165e0f86ef105#import-issue-response -type IssueImportResponse struct { - ID *int `json:"id,omitempty"` - Status *string `json:"status,omitempty"` - URL *string `json:"url,omitempty"` - ImportIssuesURL *string `json:"import_issues_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Message *string `json:"message,omitempty"` - DocumentationURL *string `json:"documentation_url,omitempty"` - Errors []*IssueImportError `json:"errors,omitempty"` -} - -// IssueImportError represents errors of an issue import create request. -type IssueImportError struct { - Location *string `json:"location,omitempty"` - Resource *string `json:"resource,omitempty"` - Field *string `json:"field,omitempty"` - Value *string `json:"value,omitempty"` - Code *string `json:"code,omitempty"` -} - -// Create a new imported issue on the specified repository. -// -// https://gist.github.com/jonmagic/5282384165e0f86ef105#start-an-issue-import -func (s *IssueImportService) Create(ctx context.Context, owner, repo string, issue *IssueImportRequest) (*IssueImportResponse, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/issues", owner, repo) - req, err := s.client.NewRequest("POST", u, issue) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeIssueImportAPI) - - i := new(IssueImportResponse) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - aerr, ok := err.(*AcceptedError) - if ok { - decErr := json.Unmarshal(aerr.Raw, i) - if decErr != nil { - err = decErr - } - - return i, resp, nil - } - - return nil, resp, err - } - - return i, resp, nil -} - -// CheckStatus checks the status of an imported issue. -// -// https://gist.github.com/jonmagic/5282384165e0f86ef105#import-status-request -func (s *IssueImportService) CheckStatus(ctx context.Context, owner, repo string, issueID int64) (*IssueImportResponse, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/issues/%v", owner, repo, issueID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeIssueImportAPI) - - i := new(IssueImportResponse) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// CheckStatusSince checks the status of multiple imported issues since a given date. -// -// https://gist.github.com/jonmagic/5282384165e0f86ef105#check-status-of-multiple-issues -func (s *IssueImportService) CheckStatusSince(ctx context.Context, owner, repo string, since Timestamp) ([]*IssueImportResponse, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/issues?since=%v", owner, repo, since.Format("2006-01-02")) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeIssueImportAPI) - - var b bytes.Buffer - resp, err := s.client.Do(ctx, req, &b) - if err != nil { - return nil, resp, err - } - - var i []*IssueImportResponse - err = json.Unmarshal(b.Bytes(), &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issues.go deleted file mode 100644 index 42e58a17a8..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// IssuesService handles communication with the issue related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/ -type IssuesService service - -// Issue represents a GitHub issue on a repository. -// -// Note: As far as the GitHub API is concerned, every pull request is an issue, -// but not every issue is a pull request. Some endpoints, events, and webhooks -// may also return pull requests via this struct. If PullRequestLinks is nil, -// this is an issue, and if PullRequestLinks is not nil, this is a pull request. -// The IsPullRequest helper method can be used to check that. -type Issue struct { - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - // StateReason can be one of: "completed", "not_planned", "reopened". - StateReason *string `json:"state_reason,omitempty"` - Locked *bool `json:"locked,omitempty"` - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - AuthorAssociation *string `json:"author_association,omitempty"` - User *User `json:"user,omitempty"` - Labels []*Label `json:"labels,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Comments *int `json:"comments,omitempty"` - ClosedAt *Timestamp `json:"closed_at,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - ClosedBy *User `json:"closed_by,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - LabelsURL *string `json:"labels_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - PullRequestLinks *PullRequestLinks `json:"pull_request,omitempty"` - Repository *Repository `json:"repository,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - Assignees []*User `json:"assignees,omitempty"` - NodeID *string `json:"node_id,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://docs.github.com/en/rest/search/#text-match-metadata - TextMatches []*TextMatch `json:"text_matches,omitempty"` - - // ActiveLockReason is populated only when LockReason is provided while locking the issue. - // Possible values are: "off-topic", "too heated", "resolved", and "spam". - ActiveLockReason *string `json:"active_lock_reason,omitempty"` -} - -func (i Issue) String() string { - return Stringify(i) -} - -// IsPullRequest reports whether the issue is also a pull request. It uses the -// method recommended by GitHub's API documentation, which is to check whether -// PullRequestLinks is non-nil. -func (i Issue) IsPullRequest() bool { - return i.PullRequestLinks != nil -} - -// IssueRequest represents a request to create/edit an issue. -// It is separate from Issue above because otherwise Labels -// and Assignee fail to serialize to the correct JSON. -type IssueRequest struct { - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - Labels *[]string `json:"labels,omitempty"` - Assignee *string `json:"assignee,omitempty"` - State *string `json:"state,omitempty"` - // StateReason can be 'completed' or 'not_planned'. - StateReason *string `json:"state_reason,omitempty"` - Milestone *int `json:"milestone,omitempty"` - Assignees *[]string `json:"assignees,omitempty"` -} - -// IssueListOptions specifies the optional parameters to the IssuesService.List -// and IssuesService.ListByOrg methods. -type IssueListOptions struct { - // Filter specifies which issues to list. Possible values are: assigned, - // created, mentioned, subscribed, all. Default is "assigned". - Filter string `url:"filter,omitempty"` - - // State filters issues based on their state. Possible values are: open, - // closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Labels filters issues based on their label. - Labels []string `url:"labels,comma,omitempty"` - - // Sort specifies how to sort issues. Possible values are: created, updated, - // and comments. Default value is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort issues. Possible values are: asc, desc. - // Default is "desc". - Direction string `url:"direction,omitempty"` - - // Since filters issues by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// PullRequestLinks object is added to the Issue object when it's an issue included -// in the IssueCommentEvent webhook payload, if the webhook is fired by a comment on a PR. -type PullRequestLinks struct { - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` -} - -// List the issues for the authenticated user. If all is true, list issues -// across all the user's visible repositories including owned, member, and -// organization repositories; if false, list only owned and member -// repositories. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-user-account-issues-assigned-to-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-issues-assigned-to-the-authenticated-user -func (s *IssuesService) List(ctx context.Context, all bool, opts *IssueListOptions) ([]*Issue, *Response, error) { - var u string - if all { - u = "issues" - } else { - u = "user/issues" - } - return s.listIssues(ctx, u, opts) -} - -// ListByOrg fetches the issues in the specified organization for the -// authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-organization-issues-assigned-to-the-authenticated-user -func (s *IssuesService) ListByOrg(ctx context.Context, org string, opts *IssueListOptions) ([]*Issue, *Response, error) { - u := fmt.Sprintf("orgs/%v/issues", org) - return s.listIssues(ctx, u, opts) -} - -func (s *IssuesService) listIssues(ctx context.Context, u string, opts *IssueListOptions) ([]*Issue, *Response, error) { - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var issues []*Issue - resp, err := s.client.Do(ctx, req, &issues) - if err != nil { - return nil, resp, err - } - - return issues, resp, nil -} - -// IssueListByRepoOptions specifies the optional parameters to the -// IssuesService.ListByRepo method. -type IssueListByRepoOptions struct { - // Milestone limits issues for the specified milestone. Possible values are - // a milestone number, "none" for issues with no milestone, "*" for issues - // with any milestone. - Milestone string `url:"milestone,omitempty"` - - // State filters issues based on their state. Possible values are: open, - // closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Assignee filters issues based on their assignee. Possible values are a - // user name, "none" for issues that are not assigned, "*" for issues with - // any assigned user. - Assignee string `url:"assignee,omitempty"` - - // Creator filters issues based on their creator. - Creator string `url:"creator,omitempty"` - - // Mentioned filters issues to those mentioned a specific user. - Mentioned string `url:"mentioned,omitempty"` - - // Labels filters issues based on their label. - Labels []string `url:"labels,omitempty,comma"` - - // Sort specifies how to sort issues. Possible values are: created, updated, - // and comments. Default value is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort issues. Possible values are: asc, desc. - // Default is "desc". - Direction string `url:"direction,omitempty"` - - // Since filters issues by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListByRepo lists the issues for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-repository-issues -func (s *IssuesService) ListByRepo(ctx context.Context, owner string, repo string, opts *IssueListByRepoOptions) ([]*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var issues []*Issue - resp, err := s.client.Do(ctx, req, &issues) - if err != nil { - return nil, resp, err - } - - return issues, resp, nil -} - -// Get a single issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#get-an-issue -func (s *IssuesService) Get(ctx context.Context, owner string, repo string, number int) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - issue := new(Issue) - resp, err := s.client.Do(ctx, req, issue) - if err != nil { - return nil, resp, err - } - - return issue, resp, nil -} - -// Create a new issue on the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#create-an-issue -func (s *IssuesService) Create(ctx context.Context, owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) - req, err := s.client.NewRequest("POST", u, issue) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// Edit (update) an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#update-an-issue -func (s *IssuesService) Edit(ctx context.Context, owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, issue) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// Remove a milestone from an issue. -// -// This is a helper method to explicitly update an issue with a `null` milestone, thereby removing it. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#update-an-issue -func (s *IssuesService) RemoveMilestone(ctx context.Context, owner, repo string, issueNumber int) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v", owner, repo, issueNumber) - req, err := s.client.NewRequest("PATCH", u, &struct { - Milestone *Milestone `json:"milestone"` - }{}) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(ctx, req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// LockIssueOptions specifies the optional parameters to the -// IssuesService.Lock method. -type LockIssueOptions struct { - // LockReason specifies the reason to lock this issue. - // Providing a lock reason can help make it clearer to contributors why an issue - // was locked. Possible values are: "off-topic", "too heated", "resolved", and "spam". - LockReason string `json:"lock_reason,omitempty"` -} - -// Lock an issue's conversation. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#lock-an-issue -func (s *IssuesService) Lock(ctx context.Context, owner string, repo string, number int, opts *LockIssueOptions) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unlock an issue's conversation. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/issues#unlock-an-issue -func (s *IssuesService) Unlock(ctx context.Context, owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_assignees.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_assignees.go deleted file mode 100644 index b7f2e80243..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_assignees.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListAssignees fetches all available assignees (owners and collaborators) to -// which issues may be assigned. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#list-assignees -func (s *IssuesService) ListAssignees(ctx context.Context, owner, repo string, opts *ListOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/assignees", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var assignees []*User - resp, err := s.client.Do(ctx, req, &assignees) - if err != nil { - return nil, resp, err - } - - return assignees, resp, nil -} - -// IsAssignee checks if a user is an assignee for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#check-if-a-user-can-be-assigned -func (s *IssuesService) IsAssignee(ctx context.Context, owner, repo, user string) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/assignees/%v", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - assignee, err := parseBoolResponse(err) - return assignee, resp, err -} - -// AddAssignees adds the provided GitHub users as assignees to the issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#add-assignees-to-an-issue -func (s *IssuesService) AddAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) { - users := &struct { - Assignees []string `json:"assignees,omitempty"` - }{Assignees: assignees} - u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number) - req, err := s.client.NewRequest("POST", u, users) - if err != nil { - return nil, nil, err - } - - issue := &Issue{} - resp, err := s.client.Do(ctx, req, issue) - if err != nil { - return nil, resp, err - } - - return issue, resp, nil -} - -// RemoveAssignees removes the provided GitHub users as assignees from the issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#remove-assignees-from-an-issue -func (s *IssuesService) RemoveAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) { - users := &struct { - Assignees []string `json:"assignees,omitempty"` - }{Assignees: assignees} - u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, users) - if err != nil { - return nil, nil, err - } - - issue := &Issue{} - resp, err := s.client.Do(ctx, req, issue) - if err != nil { - return nil, resp, err - } - - return issue, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_comments.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_comments.go deleted file mode 100644 index 17881c093d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_comments.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// IssueComment represents a comment left on an issue. -type IssueComment struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - // AuthorAssociation is the comment author's relationship to the issue's repository. - // Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE". - AuthorAssociation *string `json:"author_association,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - IssueURL *string `json:"issue_url,omitempty"` -} - -func (i IssueComment) String() string { - return Stringify(i) -} - -// IssueListCommentsOptions specifies the optional parameters to the -// IssuesService.ListComments method. -type IssueListCommentsOptions struct { - // Sort specifies how to sort comments. Possible values are: created, updated. - Sort *string `url:"sort,omitempty"` - - // Direction in which to sort comments. Possible values are: asc, desc. - Direction *string `url:"direction,omitempty"` - - // Since filters comments by time. - Since *time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListComments lists all comments on the specified issue. Specifying an issue -// number of 0 will return all comments on all issues for the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/comments#list-issue-comments -// GitHub API docs: https://docs.github.com/en/rest/issues/comments#list-issue-comments-for-a-repository -func (s *IssuesService) ListComments(ctx context.Context, owner string, repo string, number int, opts *IssueListCommentsOptions) ([]*IssueComment, *Response, error) { - var u string - if number == 0 { - u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo) - } else { - u = fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number) - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var comments []*IssueComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetComment fetches the specified issue comment. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/comments#get-an-issue-comment -func (s *IssuesService) GetComment(ctx context.Context, owner string, repo string, commentID int64) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - comment := new(IssueComment) - resp, err := s.client.Do(ctx, req, comment) - if err != nil { - return nil, resp, err - } - - return comment, resp, nil -} - -// CreateComment creates a new comment on the specified issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/comments#create-an-issue-comment -func (s *IssuesService) CreateComment(ctx context.Context, owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - c := new(IssueComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// EditComment updates an issue comment. -// A non-nil comment.Body must be provided. Other comment fields should be left nil. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/comments#update-an-issue-comment -func (s *IssuesService) EditComment(ctx context.Context, owner string, repo string, commentID int64, comment *IssueComment) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - c := new(IssueComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes an issue comment. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/comments#delete-an-issue-comment -func (s *IssuesService) DeleteComment(ctx context.Context, owner string, repo string, commentID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_events.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_events.go deleted file mode 100644 index ed07659170..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_events.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// IssueEvent represents an event that occurred around an Issue or Pull Request. -type IssueEvent struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - - // The User that generated this event. - Actor *User `json:"actor,omitempty"` - - // Event identifies the actual type of Event that occurred. Possible - // values are: - // - // closed - // The Actor closed the issue. - // If the issue was closed by commit message, CommitID holds the SHA1 hash of the commit. - // - // merged - // The Actor merged into master a branch containing a commit mentioning the issue. - // CommitID holds the SHA1 of the merge commit. - // - // referenced - // The Actor committed to master a commit mentioning the issue in its commit message. - // CommitID holds the SHA1 of the commit. - // - // reopened, unlocked - // The Actor did that to the issue. - // - // locked - // The Actor locked the issue. - // LockReason holds the reason of locking the issue (if provided while locking). - // - // renamed - // The Actor changed the issue title from Rename.From to Rename.To. - // - // mentioned - // Someone unspecified @mentioned the Actor [sic] in an issue comment body. - // - // assigned, unassigned - // The Assigner assigned the issue to or removed the assignment from the Assignee. - // - // labeled, unlabeled - // The Actor added or removed the Label from the issue. - // - // milestoned, demilestoned - // The Actor added or removed the issue from the Milestone. - // - // subscribed, unsubscribed - // The Actor subscribed to or unsubscribed from notifications for an issue. - // - // head_ref_deleted, head_ref_restored - // The pull request’s branch was deleted or restored. - // - // review_dismissed - // The review was dismissed and `DismissedReview` will be populated below. - // - // review_requested, review_request_removed - // The Actor requested or removed the request for a review. - // RequestedReviewer and ReviewRequester will be populated below. - // - Event *string `json:"event,omitempty"` - - CreatedAt *Timestamp `json:"created_at,omitempty"` - Issue *Issue `json:"issue,omitempty"` - - // Only present on certain events; see above. - Assignee *User `json:"assignee,omitempty"` - Assigner *User `json:"assigner,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - Label *Label `json:"label,omitempty"` - Rename *Rename `json:"rename,omitempty"` - LockReason *string `json:"lock_reason,omitempty"` - ProjectCard *ProjectCard `json:"project_card,omitempty"` - DismissedReview *DismissedReview `json:"dismissed_review,omitempty"` - RequestedReviewer *User `json:"requested_reviewer,omitempty"` - ReviewRequester *User `json:"review_requester,omitempty"` -} - -// DismissedReview represents details for 'dismissed_review' events. -type DismissedReview struct { - // State represents the state of the dismissed review. - // Possible values are: "commented", "approved", and "changes_requested". - State *string `json:"state,omitempty"` - ReviewID *int64 `json:"review_id,omitempty"` - DismissalMessage *string `json:"dismissal_message,omitempty"` - DismissalCommitID *string `json:"dismissal_commit_id,omitempty"` -} - -// ListIssueEvents lists events for the specified issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/events#list-issue-events -func (s *IssuesService) ListIssueEvents(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/events", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeProjectCardDetailsPreview) - - var events []*IssueEvent - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// ListRepositoryEvents lists events for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/events#list-issue-events-for-a-repository -func (s *IssuesService) ListRepositoryEvents(ctx context.Context, owner, repo string, opts *ListOptions) ([]*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []*IssueEvent - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} - -// GetEvent returns the specified issue event. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/events#get-an-issue-event -func (s *IssuesService) GetEvent(ctx context.Context, owner, repo string, id int64) (*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - event := new(IssueEvent) - resp, err := s.client.Do(ctx, req, event) - if err != nil { - return nil, resp, err - } - - return event, resp, nil -} - -// Rename contains details for 'renamed' events. -type Rename struct { - From *string `json:"from,omitempty"` - To *string `json:"to,omitempty"` -} - -func (r Rename) String() string { - return Stringify(r) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_labels.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_labels.go deleted file mode 100644 index d0f865c03f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_labels.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Label represents a GitHub label on an Issue -type Label struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Color *string `json:"color,omitempty"` - Description *string `json:"description,omitempty"` - Default *bool `json:"default,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (l Label) String() string { - return Stringify(l) -} - -// ListLabels lists all labels for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#list-labels-for-a-repository -func (s *IssuesService) ListLabels(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var labels []*Label - resp, err := s.client.Do(ctx, req, &labels) - if err != nil { - return nil, resp, err - } - - return labels, resp, nil -} - -// GetLabel gets a single label. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#get-a-label -func (s *IssuesService) GetLabel(ctx context.Context, owner string, repo string, name string) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - label := new(Label) - resp, err := s.client.Do(ctx, req, label) - if err != nil { - return nil, resp, err - } - - return label, resp, nil -} - -// CreateLabel creates a new label on the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#create-a-label -func (s *IssuesService) CreateLabel(ctx context.Context, owner string, repo string, label *Label) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) - req, err := s.client.NewRequest("POST", u, label) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(ctx, req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// EditLabel edits a label. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#update-a-label -func (s *IssuesService) EditLabel(ctx context.Context, owner string, repo string, name string, label *Label) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("PATCH", u, label) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(ctx, req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// DeleteLabel deletes a label. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#delete-a-label -func (s *IssuesService) DeleteLabel(ctx context.Context, owner string, repo string, name string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// ListLabelsByIssue lists all labels for an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#list-labels-for-an-issue -func (s *IssuesService) ListLabelsByIssue(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var labels []*Label - resp, err := s.client.Do(ctx, req, &labels) - if err != nil { - return nil, resp, err - } - - return labels, resp, nil -} - -// AddLabelsToIssue adds labels to an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#add-labels-to-an-issue -func (s *IssuesService) AddLabelsToIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("POST", u, labels) - if err != nil { - return nil, nil, err - } - - var l []*Label - resp, err := s.client.Do(ctx, req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// RemoveLabelForIssue removes a label for an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#remove-a-label-from-an-issue -func (s *IssuesService) RemoveLabelForIssue(ctx context.Context, owner string, repo string, number int, label string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels/%v", owner, repo, number, label) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ReplaceLabelsForIssue replaces all labels for an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#set-labels-for-an-issue -func (s *IssuesService) ReplaceLabelsForIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("PUT", u, labels) - if err != nil { - return nil, nil, err - } - - var l []*Label - resp, err := s.client.Do(ctx, req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// RemoveLabelsForIssue removes all labels for an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#remove-all-labels-from-an-issue -func (s *IssuesService) RemoveLabelsForIssue(ctx context.Context, owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListLabelsForMilestone lists labels for every issue in a milestone. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/labels#list-labels-for-issues-in-a-milestone -func (s *IssuesService) ListLabelsForMilestone(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d/labels", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var labels []*Label - resp, err := s.client.Do(ctx, req, &labels) - if err != nil { - return nil, resp, err - } - - return labels, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_milestones.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_milestones.go deleted file mode 100644 index 897c7c0b6d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_milestones.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Milestone represents a GitHub repository milestone. -type Milestone struct { - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - LabelsURL *string `json:"labels_url,omitempty"` - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Title *string `json:"title,omitempty"` - Description *string `json:"description,omitempty"` - Creator *User `json:"creator,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` - ClosedIssues *int `json:"closed_issues,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - ClosedAt *Timestamp `json:"closed_at,omitempty"` - DueOn *Timestamp `json:"due_on,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (m Milestone) String() string { - return Stringify(m) -} - -// MilestoneListOptions specifies the optional parameters to the -// IssuesService.ListMilestones method. -type MilestoneListOptions struct { - // State filters milestones based on their state. Possible values are: - // open, closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Sort specifies how to sort milestones. Possible values are: due_on, completeness. - // Default value is "due_on". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort milestones. Possible values are: asc, desc. - // Default is "asc". - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// ListMilestones lists all milestones for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#list-milestones -func (s *IssuesService) ListMilestones(ctx context.Context, owner string, repo string, opts *MilestoneListOptions) ([]*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var milestones []*Milestone - resp, err := s.client.Do(ctx, req, &milestones) - if err != nil { - return nil, resp, err - } - - return milestones, resp, nil -} - -// GetMilestone gets a single milestone. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#get-a-milestone -func (s *IssuesService) GetMilestone(ctx context.Context, owner string, repo string, number int) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - milestone := new(Milestone) - resp, err := s.client.Do(ctx, req, milestone) - if err != nil { - return nil, resp, err - } - - return milestone, resp, nil -} - -// CreateMilestone creates a new milestone on the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#create-a-milestone -func (s *IssuesService) CreateMilestone(ctx context.Context, owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) - req, err := s.client.NewRequest("POST", u, milestone) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// EditMilestone edits a milestone. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#update-a-milestone -func (s *IssuesService) EditMilestone(ctx context.Context, owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, milestone) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteMilestone deletes a milestone. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#delete-a-milestone -func (s *IssuesService) DeleteMilestone(ctx context.Context, owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_timeline.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_timeline.go deleted file mode 100644 index 9c73e6176d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/issues_timeline.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" -) - -// Timeline represents an event that occurred around an Issue or Pull Request. -// -// It is similar to an IssueEvent but may contain more information. -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/events/issue-event-types -type Timeline struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - CommitURL *string `json:"commit_url,omitempty"` - - // The User object that generated the event. - Actor *User `json:"actor,omitempty"` - - // The person who commented on the issue. - User *User `json:"user,omitempty"` - - // The person who authored the commit. - Author *CommitAuthor `json:"author,omitempty"` - // The person who committed the commit on behalf of the author. - Committer *CommitAuthor `json:"committer,omitempty"` - // The SHA of the commit in the pull request. - SHA *string `json:"sha,omitempty"` - // The commit message. - Message *string `json:"message,omitempty"` - // A list of parent commits. - Parents []*Commit `json:"parents,omitempty"` - - // Event identifies the actual type of Event that occurred. Possible values - // are: - // - // assigned - // The issue was assigned to the assignee. - // - // closed - // The issue was closed by the actor. When the commit_id is present, it - // identifies the commit that closed the issue using "closes / fixes #NN" - // syntax. - // - // commented - // A comment was added to the issue. - // - // committed - // A commit was added to the pull request's 'HEAD' branch. Only provided - // for pull requests. - // - // cross-referenced - // The issue was referenced from another issue. The 'source' attribute - // contains the 'id', 'actor', and 'url' of the reference's source. - // - // demilestoned - // The issue was removed from a milestone. - // - // head_ref_deleted - // The pull request's branch was deleted. - // - // head_ref_restored - // The pull request's branch was restored. - // - // labeled - // A label was added to the issue. - // - // locked - // The issue was locked by the actor. - // - // mentioned - // The actor was @mentioned in an issue body. - // - // merged - // The issue was merged by the actor. The 'commit_id' attribute is the - // SHA1 of the HEAD commit that was merged. - // - // milestoned - // The issue was added to a milestone. - // - // referenced - // The issue was referenced from a commit message. The 'commit_id' - // attribute is the commit SHA1 of where that happened. - // - // renamed - // The issue title was changed. - // - // reopened - // The issue was reopened by the actor. - // - // reviewed - // The pull request was reviewed. - // - // subscribed - // The actor subscribed to receive notifications for an issue. - // - // unassigned - // The assignee was unassigned from the issue. - // - // unlabeled - // A label was removed from the issue. - // - // unlocked - // The issue was unlocked by the actor. - // - // unsubscribed - // The actor unsubscribed to stop receiving notifications for an issue. - // - Event *string `json:"event,omitempty"` - - // The string SHA of a commit that referenced this Issue or Pull Request. - CommitID *string `json:"commit_id,omitempty"` - // The timestamp indicating when the event occurred. - CreatedAt *Timestamp `json:"created_at,omitempty"` - // The Label object including `name` and `color` attributes. Only provided for - // 'labeled' and 'unlabeled' events. - Label *Label `json:"label,omitempty"` - // The User object which was assigned to (or unassigned from) this Issue or - // Pull Request. Only provided for 'assigned' and 'unassigned' events. - Assignee *User `json:"assignee,omitempty"` - Assigner *User `json:"assigner,omitempty"` - - // The Milestone object including a 'title' attribute. - // Only provided for 'milestoned' and 'demilestoned' events. - Milestone *Milestone `json:"milestone,omitempty"` - // The 'id', 'actor', and 'url' for the source of a reference from another issue. - // Only provided for 'cross-referenced' events. - Source *Source `json:"source,omitempty"` - // An object containing rename details including 'from' and 'to' attributes. - // Only provided for 'renamed' events. - Rename *Rename `json:"rename,omitempty"` - ProjectCard *ProjectCard `json:"project_card,omitempty"` - // The state of a submitted review. Can be one of: 'commented', - // 'changes_requested' or 'approved'. - // Only provided for 'reviewed' events. - State *string `json:"state,omitempty"` - - // The person requested to review the pull request. - Reviewer *User `json:"requested_reviewer,omitempty"` - // RequestedTeam contains the team requested to review the pull request. - RequestedTeam *Team `json:"requested_team,omitempty"` - // The person who requested a review. - Requester *User `json:"review_requester,omitempty"` - - // The review summary text. - Body *string `json:"body,omitempty"` - SubmittedAt *Timestamp `json:"submitted_at,omitempty"` -} - -// Source represents a reference's source. -type Source struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Actor *User `json:"actor,omitempty"` - Type *string `json:"type,omitempty"` - Issue *Issue `json:"issue,omitempty"` -} - -// ListIssueTimeline lists events for the specified issue. -// -// GitHub API docs: https://docs.github.com/en/rest/issues/timeline#list-timeline-events-for-an-issue -func (s *IssuesService) ListIssueTimeline(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*Timeline, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/timeline", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeTimelinePreview, mediaTypeProjectCardDetailsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var events []*Timeline - resp, err := s.client.Do(ctx, req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/licenses.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/licenses.go deleted file mode 100644 index 0877b6d183..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/licenses.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// LicensesService handles communication with the license related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/licenses/ -type LicensesService service - -// RepositoryLicense represents the license for a repository. -type RepositoryLicense struct { - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - - SHA *string `json:"sha,omitempty"` - Size *int `json:"size,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - DownloadURL *string `json:"download_url,omitempty"` - Type *string `json:"type,omitempty"` - Content *string `json:"content,omitempty"` - Encoding *string `json:"encoding,omitempty"` - License *License `json:"license,omitempty"` -} - -func (l RepositoryLicense) String() string { - return Stringify(l) -} - -// License represents an open source license. -type License struct { - Key *string `json:"key,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - - SPDXID *string `json:"spdx_id,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Featured *bool `json:"featured,omitempty"` - Description *string `json:"description,omitempty"` - Implementation *string `json:"implementation,omitempty"` - Permissions *[]string `json:"permissions,omitempty"` - Conditions *[]string `json:"conditions,omitempty"` - Limitations *[]string `json:"limitations,omitempty"` - Body *string `json:"body,omitempty"` -} - -func (l License) String() string { - return Stringify(l) -} - -// List popular open source licenses. -// -// GitHub API docs: https://docs.github.com/en/rest/licenses/#list-all-licenses -func (s *LicensesService) List(ctx context.Context) ([]*License, *Response, error) { - req, err := s.client.NewRequest("GET", "licenses", nil) - if err != nil { - return nil, nil, err - } - - var licenses []*License - resp, err := s.client.Do(ctx, req, &licenses) - if err != nil { - return nil, resp, err - } - - return licenses, resp, nil -} - -// Get extended metadata for one license. -// -// GitHub API docs: https://docs.github.com/en/rest/licenses#get-a-license -func (s *LicensesService) Get(ctx context.Context, licenseName string) (*License, *Response, error) { - u := fmt.Sprintf("licenses/%s", licenseName) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - license := new(License) - resp, err := s.client.Do(ctx, req, license) - if err != nil { - return nil, resp, err - } - - return license, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/messages.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/messages.go deleted file mode 100644 index bb5ae3f389..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/messages.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides functions for validating payloads from GitHub Webhooks. -// GitHub API docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github - -package github - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "hash" - "io" - "mime" - "net/http" - "net/url" - "strings" -) - -const ( - // sha1Prefix is the prefix used by GitHub before the HMAC hexdigest. - sha1Prefix = "sha1" - // sha256Prefix and sha512Prefix are provided for future compatibility. - sha256Prefix = "sha256" - sha512Prefix = "sha512" - // SHA1SignatureHeader is the GitHub header key used to pass the HMAC-SHA1 hexdigest. - SHA1SignatureHeader = "X-Hub-Signature" - // SHA256SignatureHeader is the GitHub header key used to pass the HMAC-SHA256 hexdigest. - SHA256SignatureHeader = "X-Hub-Signature-256" - // EventTypeHeader is the GitHub header key used to pass the event type. - EventTypeHeader = "X-Github-Event" - // DeliveryIDHeader is the GitHub header key used to pass the unique ID for the webhook event. - DeliveryIDHeader = "X-Github-Delivery" -) - -var ( - // eventTypeMapping maps webhooks types to their corresponding go-github struct types. - eventTypeMapping = map[string]string{ - "branch_protection_rule": "BranchProtectionRuleEvent", - "check_run": "CheckRunEvent", - "check_suite": "CheckSuiteEvent", - "code_scanning_alert": "CodeScanningAlertEvent", - "commit_comment": "CommitCommentEvent", - "content_reference": "ContentReferenceEvent", - "create": "CreateEvent", - "delete": "DeleteEvent", - "deploy_key": "DeployKeyEvent", - "deployment": "DeploymentEvent", - "deployment_status": "DeploymentStatusEvent", - "deployment_protection_rule": "DeploymentProtectionRuleEvent", - "discussion": "DiscussionEvent", - "discussion_comment": "DiscussionCommentEvent", - "fork": "ForkEvent", - "github_app_authorization": "GitHubAppAuthorizationEvent", - "gollum": "GollumEvent", - "installation": "InstallationEvent", - "installation_repositories": "InstallationRepositoriesEvent", - "issue_comment": "IssueCommentEvent", - "issues": "IssuesEvent", - "label": "LabelEvent", - "marketplace_purchase": "MarketplacePurchaseEvent", - "member": "MemberEvent", - "membership": "MembershipEvent", - "merge_group": "MergeGroupEvent", - "meta": "MetaEvent", - "milestone": "MilestoneEvent", - "organization": "OrganizationEvent", - "org_block": "OrgBlockEvent", - "package": "PackageEvent", - "page_build": "PageBuildEvent", - "ping": "PingEvent", - "project": "ProjectEvent", - "project_card": "ProjectCardEvent", - "project_column": "ProjectColumnEvent", - "public": "PublicEvent", - "pull_request": "PullRequestEvent", - "pull_request_review": "PullRequestReviewEvent", - "pull_request_review_comment": "PullRequestReviewCommentEvent", - "pull_request_review_thread": "PullRequestReviewThreadEvent", - "pull_request_target": "PullRequestTargetEvent", - "push": "PushEvent", - "repository": "RepositoryEvent", - "repository_dispatch": "RepositoryDispatchEvent", - "repository_import": "RepositoryImportEvent", - "repository_vulnerability_alert": "RepositoryVulnerabilityAlertEvent", - "release": "ReleaseEvent", - "secret_scanning_alert": "SecretScanningAlertEvent", - "security_advisory": "SecurityAdvisoryEvent", - "star": "StarEvent", - "status": "StatusEvent", - "team": "TeamEvent", - "team_add": "TeamAddEvent", - "user": "UserEvent", - "watch": "WatchEvent", - "workflow_dispatch": "WorkflowDispatchEvent", - "workflow_job": "WorkflowJobEvent", - "workflow_run": "WorkflowRunEvent", - } -) - -// genMAC generates the HMAC signature for a message provided the secret key -// and hashFunc. -func genMAC(message, key []byte, hashFunc func() hash.Hash) []byte { - mac := hmac.New(hashFunc, key) - mac.Write(message) - return mac.Sum(nil) -} - -// checkMAC reports whether messageMAC is a valid HMAC tag for message. -func checkMAC(message, messageMAC, key []byte, hashFunc func() hash.Hash) bool { - expectedMAC := genMAC(message, key, hashFunc) - return hmac.Equal(messageMAC, expectedMAC) -} - -// messageMAC returns the hex-decoded HMAC tag from the signature and its -// corresponding hash function. -func messageMAC(signature string) ([]byte, func() hash.Hash, error) { - if signature == "" { - return nil, nil, errors.New("missing signature") - } - sigParts := strings.SplitN(signature, "=", 2) - if len(sigParts) != 2 { - return nil, nil, fmt.Errorf("error parsing signature %q", signature) - } - - var hashFunc func() hash.Hash - switch sigParts[0] { - case sha1Prefix: - hashFunc = sha1.New - case sha256Prefix: - hashFunc = sha256.New - case sha512Prefix: - hashFunc = sha512.New - default: - return nil, nil, fmt.Errorf("unknown hash type prefix: %q", sigParts[0]) - } - - buf, err := hex.DecodeString(sigParts[1]) - if err != nil { - return nil, nil, fmt.Errorf("error decoding signature %q: %v", signature, err) - } - return buf, hashFunc, nil -} - -// ValidatePayloadFromBody validates an incoming GitHub Webhook event request body -// and returns the (JSON) payload. -// The Content-Type header of the payload can be "application/json" or "application/x-www-form-urlencoded". -// If the Content-Type is neither then an error is returned. -// secretToken is the GitHub Webhook secret token. -// If your webhook does not contain a secret token, you can pass an empty secretToken. -// Webhooks without a secret token are not secure and should be avoided. -// -// Example usage: -// -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// // read signature from request -// signature := "" -// payload, err := github.ValidatePayloadFromBody(r.Header.Get("Content-Type"), r.Body, signature, s.webhookSecretKey) -// if err != nil { ... } -// // Process payload... -// } -func ValidatePayloadFromBody(contentType string, readable io.Reader, signature string, secretToken []byte) (payload []byte, err error) { - var body []byte // Raw body that GitHub uses to calculate the signature. - - switch contentType { - case "application/json": - var err error - if body, err = io.ReadAll(readable); err != nil { - return nil, err - } - - // If the content type is application/json, - // the JSON payload is just the original body. - payload = body - - case "application/x-www-form-urlencoded": - // payloadFormParam is the name of the form parameter that the JSON payload - // will be in if a webhook has its content type set to application/x-www-form-urlencoded. - const payloadFormParam = "payload" - - var err error - if body, err = io.ReadAll(readable); err != nil { - return nil, err - } - - // If the content type is application/x-www-form-urlencoded, - // the JSON payload will be under the "payload" form param. - form, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - payload = []byte(form.Get(payloadFormParam)) - - default: - return nil, fmt.Errorf("webhook request has unsupported Content-Type %q", contentType) - } - - // Validate the signature if present or if one is expected (secretToken is non-empty). - if len(secretToken) > 0 || len(signature) > 0 { - if err := ValidateSignature(signature, body, secretToken); err != nil { - return nil, err - } - } - - return payload, nil -} - -// ValidatePayload validates an incoming GitHub Webhook event request -// and returns the (JSON) payload. -// The Content-Type header of the payload can be "application/json" or "application/x-www-form-urlencoded". -// If the Content-Type is neither then an error is returned. -// secretToken is the GitHub Webhook secret token. -// If your webhook does not contain a secret token, you can pass nil or an empty slice. -// This is intended for local development purposes only and all webhooks should ideally set up a secret token. -// -// Example usage: -// -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := github.ValidatePayload(r, s.webhookSecretKey) -// if err != nil { ... } -// // Process payload... -// } -func ValidatePayload(r *http.Request, secretToken []byte) (payload []byte, err error) { - signature := r.Header.Get(SHA256SignatureHeader) - if signature == "" { - signature = r.Header.Get(SHA1SignatureHeader) - } - - contentType, _, err := mime.ParseMediaType(r.Header.Get("Content-Type")) - if err != nil { - return nil, err - } - - return ValidatePayloadFromBody(contentType, r.Body, signature, secretToken) -} - -// ValidateSignature validates the signature for the given payload. -// signature is the GitHub hash signature delivered in the X-Hub-Signature header. -// payload is the JSON payload sent by GitHub Webhooks. -// secretToken is the GitHub Webhook secret token. -// -// GitHub API docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github -func ValidateSignature(signature string, payload, secretToken []byte) error { - messageMAC, hashFunc, err := messageMAC(signature) - if err != nil { - return err - } - if !checkMAC(payload, messageMAC, secretToken, hashFunc) { - return errors.New("payload signature check failed") - } - return nil -} - -// WebHookType returns the event type of webhook request r. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/events/github-event-types -func WebHookType(r *http.Request) string { - return r.Header.Get(EventTypeHeader) -} - -// DeliveryID returns the unique delivery ID of webhook request r. -// -// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/events/github-event-types -func DeliveryID(r *http.Request) string { - return r.Header.Get(DeliveryIDHeader) -} - -// ParseWebHook parses the event payload. For recognized event types, a -// value of the corresponding struct type will be returned (as returned -// by Event.ParsePayload()). An error will be returned for unrecognized event -// types. -// -// Example usage: -// -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := github.ValidatePayload(r, s.webhookSecretKey) -// if err != nil { ... } -// event, err := github.ParseWebHook(github.WebHookType(r), payload) -// if err != nil { ... } -// switch event := event.(type) { -// case *github.CommitCommentEvent: -// processCommitCommentEvent(event) -// case *github.CreateEvent: -// processCreateEvent(event) -// ... -// } -// } -func ParseWebHook(messageType string, payload []byte) (interface{}, error) { - eventType, ok := eventTypeMapping[messageType] - if !ok { - return nil, fmt.Errorf("unknown X-Github-Event in message: %v", messageType) - } - - event := Event{ - Type: &eventType, - RawPayload: (*json.RawMessage)(&payload), - } - return event.ParsePayload() -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations.go deleted file mode 100644 index 67989c0789..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "errors" - "fmt" - "net/http" - "strings" -) - -// MigrationService provides access to the migration related functions -// in the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/migration/ -type MigrationService service - -// Migration represents a GitHub migration (archival). -type Migration struct { - ID *int64 `json:"id,omitempty"` - GUID *string `json:"guid,omitempty"` - // State is the current state of a migration. - // Possible values are: - // "pending" which means the migration hasn't started yet, - // "exporting" which means the migration is in progress, - // "exported" which means the migration finished successfully, or - // "failed" which means the migration failed. - State *string `json:"state,omitempty"` - // LockRepositories indicates whether repositories are locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` - URL *string `json:"url,omitempty"` - CreatedAt *string `json:"created_at,omitempty"` - UpdatedAt *string `json:"updated_at,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` -} - -func (m Migration) String() string { - return Stringify(m) -} - -// MigrationOptions specifies the optional parameters to Migration methods. -type MigrationOptions struct { - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories bool - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments bool -} - -// startMigration represents the body of a StartMigration request. -type startMigration struct { - // Repositories is a slice of repository names to migrate. - Repositories []string `json:"repositories,omitempty"` - - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` -} - -// StartMigration starts the generation of a migration archive. -// repos is a slice of repository names to migrate. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#start-an-organization-migration -func (s *MigrationService) StartMigration(ctx context.Context, org string, repos []string, opts *MigrationOptions) (*Migration, *Response, error) { - u := fmt.Sprintf("orgs/%v/migrations", org) - - body := &startMigration{Repositories: repos} - if opts != nil { - body.LockRepositories = Bool(opts.LockRepositories) - body.ExcludeAttachments = Bool(opts.ExcludeAttachments) - } - - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &Migration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListMigrations lists the most recent migrations. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#list-organization-migrations -func (s *MigrationService) ListMigrations(ctx context.Context, org string, opts *ListOptions) ([]*Migration, *Response, error) { - u := fmt.Sprintf("orgs/%v/migrations", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - var m []*Migration - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// MigrationStatus gets the status of a specific migration archive. -// id is the migration ID. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#get-an-organization-migration-status -func (s *MigrationService) MigrationStatus(ctx context.Context, org string, id int64) (*Migration, *Response, error) { - u := fmt.Sprintf("orgs/%v/migrations/%v", org, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &Migration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// MigrationArchiveURL fetches a migration archive URL. -// id is the migration ID. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#download-an-organization-migration-archive -func (s *MigrationService) MigrationArchiveURL(ctx context.Context, org string, id int64) (url string, err error) { - u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - s.client.clientMu.Lock() - defer s.client.clientMu.Unlock() - - // Disable the redirect mechanism because AWS fails if the GitHub auth token is provided. - var loc string - saveRedirect := s.client.client.CheckRedirect - s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - loc = req.URL.String() - return errors.New("disable redirect") - } - defer func() { s.client.client.CheckRedirect = saveRedirect }() - - _, err = s.client.Do(ctx, req, nil) // expect error from disable redirect - if err == nil { - return "", errors.New("expected redirect, none provided") - } - if !strings.Contains(err.Error(), "disable redirect") { - return "", err - } - return loc, nil -} - -// DeleteMigration deletes a previous migration archive. -// id is the migration ID. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#delete-an-organization-migration-archive -func (s *MigrationService) DeleteMigration(ctx context.Context, org string, id int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnlockRepo unlocks a repository that was locked for migration. -// id is the migration ID. -// You should unlock each migrated repository and delete them when the migration -// is complete and you no longer need the source data. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#unlock-an-organization-repository -func (s *MigrationService) UnlockRepo(ctx context.Context, org string, id int64, repo string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/migrations/%v/repos/%v/lock", org, id, repo) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations_source_import.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations_source_import.go deleted file mode 100644 index 74a04b22a4..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations_source_import.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Import represents a repository import request. -type Import struct { - // The URL of the originating repository. - VCSURL *string `json:"vcs_url,omitempty"` - // The originating VCS type. Can be one of 'subversion', 'git', - // 'mercurial', or 'tfvc'. Without this parameter, the import job will - // take additional time to detect the VCS type before beginning the - // import. This detection step will be reflected in the response. - VCS *string `json:"vcs,omitempty"` - // VCSUsername and VCSPassword are only used for StartImport calls that - // are importing a password-protected repository. - VCSUsername *string `json:"vcs_username,omitempty"` - VCSPassword *string `json:"vcs_password,omitempty"` - // For a tfvc import, the name of the project that is being imported. - TFVCProject *string `json:"tfvc_project,omitempty"` - - // LFS related fields that may be preset in the Import Progress response - - // Describes whether the import has been opted in or out of using Git - // LFS. The value can be 'opt_in', 'opt_out', or 'undecided' if no - // action has been taken. - UseLFS *string `json:"use_lfs,omitempty"` - // Describes whether files larger than 100MB were found during the - // importing step. - HasLargeFiles *bool `json:"has_large_files,omitempty"` - // The total size in gigabytes of files larger than 100MB found in the - // originating repository. - LargeFilesSize *int `json:"large_files_size,omitempty"` - // The total number of files larger than 100MB found in the originating - // repository. To see a list of these files, call LargeFiles. - LargeFilesCount *int `json:"large_files_count,omitempty"` - - // Identifies the current status of an import. An import that does not - // have errors will progress through these steps: - // - // detecting - the "detection" step of the import is in progress - // because the request did not include a VCS parameter. The - // import is identifying the type of source control present at - // the URL. - // importing - the "raw" step of the import is in progress. This is - // where commit data is fetched from the original repository. - // The import progress response will include CommitCount (the - // total number of raw commits that will be imported) and - // Percent (0 - 100, the current progress through the import). - // mapping - the "rewrite" step of the import is in progress. This - // is where SVN branches are converted to Git branches, and - // where author updates are applied. The import progress - // response does not include progress information. - // pushing - the "push" step of the import is in progress. This is - // where the importer updates the repository on GitHub. The - // import progress response will include PushPercent, which is - // the percent value reported by git push when it is "Writing - // objects". - // complete - the import is complete, and the repository is ready - // on GitHub. - // - // If there are problems, you will see one of these in the status field: - // - // auth_failed - the import requires authentication in order to - // connect to the original repository. Make an UpdateImport - // request, and include VCSUsername and VCSPassword. - // error - the import encountered an error. The import progress - // response will include the FailedStep and an error message. - // Contact GitHub support for more information. - // detection_needs_auth - the importer requires authentication for - // the originating repository to continue detection. Make an - // UpdatImport request, and include VCSUsername and - // VCSPassword. - // detection_found_nothing - the importer didn't recognize any - // source control at the URL. - // detection_found_multiple - the importer found several projects - // or repositories at the provided URL. When this is the case, - // the Import Progress response will also include a - // ProjectChoices field with the possible project choices as - // values. Make an UpdateImport request, and include VCS and - // (if applicable) TFVCProject. - Status *string `json:"status,omitempty"` - CommitCount *int `json:"commit_count,omitempty"` - StatusText *string `json:"status_text,omitempty"` - AuthorsCount *int `json:"authors_count,omitempty"` - Percent *int `json:"percent,omitempty"` - PushPercent *int `json:"push_percent,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - AuthorsURL *string `json:"authors_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - Message *string `json:"message,omitempty"` - FailedStep *string `json:"failed_step,omitempty"` - - // Human readable display name, provided when the Import appears as - // part of ProjectChoices. - HumanName *string `json:"human_name,omitempty"` - - // When the importer finds several projects or repositories at the - // provided URLs, this will identify the available choices. Call - // UpdateImport with the selected Import value. - ProjectChoices []*Import `json:"project_choices,omitempty"` -} - -func (i Import) String() string { - return Stringify(i) -} - -// SourceImportAuthor identifies an author imported from a source repository. -// -// GitHub API docs: https://docs.github.com/en/rest/migration/source_imports/#get-commit-authors -type SourceImportAuthor struct { - ID *int64 `json:"id,omitempty"` - RemoteID *string `json:"remote_id,omitempty"` - RemoteName *string `json:"remote_name,omitempty"` - Email *string `json:"email,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - ImportURL *string `json:"import_url,omitempty"` -} - -func (a SourceImportAuthor) String() string { - return Stringify(a) -} - -// LargeFile identifies a file larger than 100MB found during a repository import. -// -// GitHub API docs: https://docs.github.com/en/rest/migration/source_imports/#get-large-files -type LargeFile struct { - RefName *string `json:"ref_name,omitempty"` - Path *string `json:"path,omitempty"` - OID *string `json:"oid,omitempty"` - Size *int `json:"size,omitempty"` -} - -func (f LargeFile) String() string { - return Stringify(f) -} - -// StartImport initiates a repository import. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#start-an-import -func (s *MigrationService) StartImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("PUT", u, in) - if err != nil { - return nil, nil, err - } - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// ImportProgress queries for the status and progress of an ongoing repository import. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#get-an-import-status -func (s *MigrationService) ImportProgress(ctx context.Context, owner, repo string) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// UpdateImport initiates a repository import. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#update-an-import -func (s *MigrationService) UpdateImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("PATCH", u, in) - if err != nil { - return nil, nil, err - } - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// CommitAuthors gets the authors mapped from the original repository. -// -// Each type of source control system represents authors in a different way. -// For example, a Git commit author has a display name and an email address, -// but a Subversion commit author just has a username. The GitHub Importer will -// make the author information valid, but the author might not be correct. For -// example, it will change the bare Subversion username "hubot" into something -// like "hubot ". -// -// This method and MapCommitAuthor allow you to provide correct Git author -// information. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#get-commit-authors -func (s *MigrationService) CommitAuthors(ctx context.Context, owner, repo string) ([]*SourceImportAuthor, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/authors", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var authors []*SourceImportAuthor - resp, err := s.client.Do(ctx, req, &authors) - if err != nil { - return nil, resp, err - } - - return authors, resp, nil -} - -// MapCommitAuthor updates an author's identity for the import. Your -// application can continue updating authors any time before you push new -// commits to the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#map-a-commit-author -func (s *MigrationService) MapCommitAuthor(ctx context.Context, owner, repo string, id int64, author *SourceImportAuthor) (*SourceImportAuthor, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/authors/%v", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, author) - if err != nil { - return nil, nil, err - } - - out := new(SourceImportAuthor) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// SetLFSPreference sets whether imported repositories should use Git LFS for -// files larger than 100MB. Only the UseLFS field on the provided Import is -// used. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#update-git-lfs-preference -func (s *MigrationService) SetLFSPreference(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/lfs", owner, repo) - req, err := s.client.NewRequest("PATCH", u, in) - if err != nil { - return nil, nil, err - } - - out := new(Import) - resp, err := s.client.Do(ctx, req, out) - if err != nil { - return nil, resp, err - } - - return out, resp, nil -} - -// LargeFiles lists files larger than 100MB found during the import. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#get-large-files -func (s *MigrationService) LargeFiles(ctx context.Context, owner, repo string) ([]*LargeFile, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/import/large_files", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var files []*LargeFile - resp, err := s.client.Do(ctx, req, &files) - if err != nil { - return nil, resp, err - } - - return files, resp, nil -} - -// CancelImport stops an import for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#cancel-an-import -func (s *MigrationService) CancelImport(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/import", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations_user.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations_user.go deleted file mode 100644 index 6586fdb2d3..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/migrations_user.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "errors" - "fmt" - "net/http" -) - -// UserMigration represents a GitHub migration (archival). -type UserMigration struct { - ID *int64 `json:"id,omitempty"` - GUID *string `json:"guid,omitempty"` - // State is the current state of a migration. - // Possible values are: - // "pending" which means the migration hasn't started yet, - // "exporting" which means the migration is in progress, - // "exported" which means the migration finished successfully, or - // "failed" which means the migration failed. - State *string `json:"state,omitempty"` - // LockRepositories indicates whether repositories are locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` - URL *string `json:"url,omitempty"` - CreatedAt *string `json:"created_at,omitempty"` - UpdatedAt *string `json:"updated_at,omitempty"` - Repositories []*Repository `json:"repositories,omitempty"` -} - -func (m UserMigration) String() string { - return Stringify(m) -} - -// UserMigrationOptions specifies the optional parameters to Migration methods. -type UserMigrationOptions struct { - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories bool - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments bool -} - -// startUserMigration represents the body of a StartMigration request. -type startUserMigration struct { - // Repositories is a slice of repository names to migrate. - Repositories []string `json:"repositories,omitempty"` - - // LockRepositories indicates whether repositories should be locked (to prevent - // manipulation) while migrating data. - LockRepositories *bool `json:"lock_repositories,omitempty"` - - // ExcludeAttachments indicates whether attachments should be excluded from - // the migration (to reduce migration archive file size). - ExcludeAttachments *bool `json:"exclude_attachments,omitempty"` -} - -// StartUserMigration starts the generation of a migration archive. -// repos is a slice of repository names to migrate. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/users#start-a-user-migration -func (s *MigrationService) StartUserMigration(ctx context.Context, repos []string, opts *UserMigrationOptions) (*UserMigration, *Response, error) { - u := "user/migrations" - - body := &startUserMigration{Repositories: repos} - if opts != nil { - body.LockRepositories = Bool(opts.LockRepositories) - body.ExcludeAttachments = Bool(opts.ExcludeAttachments) - } - - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &UserMigration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListUserMigrations lists the most recent migrations. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/users#list-user-migrations -func (s *MigrationService) ListUserMigrations(ctx context.Context, opts *ListOptions) ([]*UserMigration, *Response, error) { - u := "user/migrations" - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - var m []*UserMigration - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UserMigrationStatus gets the status of a specific migration archive. -// id is the migration ID. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/users#get-a-user-migration-status -func (s *MigrationService) UserMigrationStatus(ctx context.Context, id int64) (*UserMigration, *Response, error) { - u := fmt.Sprintf("user/migrations/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &UserMigration{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UserMigrationArchiveURL gets the URL for a specific migration archive. -// id is the migration ID. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/users#download-a-user-migration-archive -func (s *MigrationService) UserMigrationArchiveURL(ctx context.Context, id int64) (string, error) { - url := fmt.Sprintf("user/migrations/%v/archive", id) - - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return "", err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - m := &UserMigration{} - - var loc string - originalRedirect := s.client.client.CheckRedirect - s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - loc = req.URL.String() - return http.ErrUseLastResponse - } - defer func() { - s.client.client.CheckRedirect = originalRedirect - }() - resp, err := s.client.Do(ctx, req, m) - if err == nil { - return "", errors.New("expected redirect, none provided") - } - loc = resp.Header.Get("Location") - return loc, nil -} - -// DeleteUserMigration will delete a previous migration archive. -// id is the migration ID. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/users#delete-a-user-migration-archive -func (s *MigrationService) DeleteUserMigration(ctx context.Context, id int64) (*Response, error) { - url := fmt.Sprintf("user/migrations/%v/archive", id) - - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnlockUserRepo will unlock a repo that was locked for migration. -// id is migration ID. -// You should unlock each migrated repository and delete them when the migration -// is complete and you no longer need the source data. -// -// GitHub API docs: https://docs.github.com/en/rest/migrations/users#unlock-a-user-repository -func (s *MigrationService) UnlockUserRepo(ctx context.Context, id int64, repo string) (*Response, error) { - url := fmt.Sprintf("user/migrations/%v/repos/%v/lock", id, repo) - - req, err := s.client.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMigrationsPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/misc.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/misc.go deleted file mode 100644 index 8961524157..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/misc.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" - "net/url" -) - -// MarkdownOptions specifies optional parameters to the Markdown method. -type MarkdownOptions struct { - // Mode identifies the rendering mode. Possible values are: - // markdown - render a document as plain Markdown, just like - // README files are rendered. - // - // gfm - to render a document as user-content, e.g. like user - // comments or issues are rendered. In GFM mode, hard line breaks are - // always taken into account, and issue and user mentions are linked - // accordingly. - // - // Default is "markdown". - Mode string - - // Context identifies the repository context. Only taken into account - // when rendering as "gfm". - Context string -} - -type markdownRequest struct { - Text *string `json:"text,omitempty"` - Mode *string `json:"mode,omitempty"` - Context *string `json:"context,omitempty"` -} - -// Markdown renders an arbitrary Markdown document. -// -// GitHub API docs: https://docs.github.com/en/rest/markdown/ -func (c *Client) Markdown(ctx context.Context, text string, opts *MarkdownOptions) (string, *Response, error) { - request := &markdownRequest{Text: String(text)} - if opts != nil { - if opts.Mode != "" { - request.Mode = String(opts.Mode) - } - if opts.Context != "" { - request.Context = String(opts.Context) - } - } - - req, err := c.NewRequest("POST", "markdown", request) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(ctx, req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// ListEmojis returns the emojis available to use on GitHub. -// -// GitHub API docs: https://docs.github.com/en/rest/emojis/ -func (c *Client) ListEmojis(ctx context.Context) (map[string]string, *Response, error) { - req, err := c.NewRequest("GET", "emojis", nil) - if err != nil { - return nil, nil, err - } - - var emoji map[string]string - resp, err := c.Do(ctx, req, &emoji) - if err != nil { - return nil, resp, err - } - - return emoji, resp, nil -} - -// CodeOfConduct represents a code of conduct. -type CodeOfConduct struct { - Name *string `json:"name,omitempty"` - Key *string `json:"key,omitempty"` - URL *string `json:"url,omitempty"` - Body *string `json:"body,omitempty"` -} - -func (c *CodeOfConduct) String() string { - return Stringify(c) -} - -// ListCodesOfConduct returns all codes of conduct. -// -// GitHub API docs: https://docs.github.com/en/rest/codes_of_conduct/#list-all-codes-of-conduct -func (c *Client) ListCodesOfConduct(ctx context.Context) ([]*CodeOfConduct, *Response, error) { - req, err := c.NewRequest("GET", "codes_of_conduct", nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCodesOfConductPreview) - - var cs []*CodeOfConduct - resp, err := c.Do(ctx, req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil -} - -// GetCodeOfConduct returns an individual code of conduct. -// -// https://docs.github.com/en/rest/codes_of_conduct/#get-an-individual-code-of-conduct -func (c *Client) GetCodeOfConduct(ctx context.Context, key string) (*CodeOfConduct, *Response, error) { - u := fmt.Sprintf("codes_of_conduct/%s", key) - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCodesOfConductPreview) - - coc := new(CodeOfConduct) - resp, err := c.Do(ctx, req, coc) - if err != nil { - return nil, resp, err - } - - return coc, resp, nil -} - -// APIMeta represents metadata about the GitHub API. -type APIMeta struct { - // An Array of IP addresses in CIDR format specifying the addresses - // that incoming service hooks will originate from on GitHub.com. - Hooks []string `json:"hooks,omitempty"` - - // An Array of IP addresses in CIDR format specifying the Git servers - // for GitHub.com. - Git []string `json:"git,omitempty"` - - // Whether authentication with username and password is supported. - // (GitHub Enterprise instances using CAS or OAuth for authentication - // will return false. Features like Basic Authentication with a - // username and password, sudo mode, and two-factor authentication are - // not supported on these servers.) - VerifiablePasswordAuthentication *bool `json:"verifiable_password_authentication,omitempty"` - - // An array of IP addresses in CIDR format specifying the addresses - // which serve GitHub Pages websites. - Pages []string `json:"pages,omitempty"` - - // An Array of IP addresses specifying the addresses that source imports - // will originate from on GitHub.com. - Importer []string `json:"importer,omitempty"` - - // An array of IP addresses in CIDR format specifying the IP addresses - // GitHub Actions will originate from. - Actions []string `json:"actions,omitempty"` - - // An array of IP addresses in CIDR format specifying the IP addresses - // Dependabot will originate from. - Dependabot []string `json:"dependabot,omitempty"` - - // A map of algorithms to SSH key fingerprints. - SSHKeyFingerprints map[string]string `json:"ssh_key_fingerprints,omitempty"` - - // An array of SSH keys. - SSHKeys []string `json:"ssh_keys,omitempty"` - - // An array of IP addresses in CIDR format specifying the addresses - // which serve GitHub websites. - Web []string `json:"web,omitempty"` - - // An array of IP addresses in CIDR format specifying the addresses - // which serve GitHub APIs. - API []string `json:"api,omitempty"` -} - -// APIMeta returns information about GitHub.com, the service. Or, if you access -// this endpoint on your organization’s GitHub Enterprise installation, this -// endpoint provides information about that installation. -// -// GitHub API docs: https://docs.github.com/en/rest/meta#get-github-meta-information -func (c *Client) APIMeta(ctx context.Context) (*APIMeta, *Response, error) { - req, err := c.NewRequest("GET", "meta", nil) - if err != nil { - return nil, nil, err - } - - meta := new(APIMeta) - resp, err := c.Do(ctx, req, meta) - if err != nil { - return nil, resp, err - } - - return meta, resp, nil -} - -// Octocat returns an ASCII art octocat with the specified message in a speech -// bubble. If message is empty, a random zen phrase is used. -func (c *Client) Octocat(ctx context.Context, message string) (string, *Response, error) { - u := "octocat" - if message != "" { - u = fmt.Sprintf("%s?s=%s", u, url.QueryEscape(message)) - } - - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(ctx, req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// Zen returns a random line from The Zen of GitHub. -// -// see also: http://warpspire.com/posts/taste/ -func (c *Client) Zen(ctx context.Context) (string, *Response, error) { - req, err := c.NewRequest("GET", "zen", nil) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(ctx, req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// ServiceHook represents a hook that has configuration settings, a list of -// available events, and default events. -type ServiceHook struct { - Name *string `json:"name,omitempty"` - Events []string `json:"events,omitempty"` - SupportedEvents []string `json:"supported_events,omitempty"` - Schema [][]string `json:"schema,omitempty"` -} - -func (s *ServiceHook) String() string { - return Stringify(s) -} - -// ListServiceHooks lists all of the available service hooks. -// -// GitHub API docs: https://developer.github.com/webhooks/#services -func (c *Client) ListServiceHooks(ctx context.Context) ([]*ServiceHook, *Response, error) { - u := "hooks" - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var hooks []*ServiceHook - resp, err := c.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs.go deleted file mode 100644 index 0c7e361b3f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// OrganizationsService provides access to the organization related functions -// in the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/ -type OrganizationsService service - -// Organization represents a GitHub organization account. -type Organization struct { - Login *string `json:"login,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Name *string `json:"name,omitempty"` - Company *string `json:"company,omitempty"` - Blog *string `json:"blog,omitempty"` - Location *string `json:"location,omitempty"` - Email *string `json:"email,omitempty"` - TwitterUsername *string `json:"twitter_username,omitempty"` - Description *string `json:"description,omitempty"` - PublicRepos *int `json:"public_repos,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` - Followers *int `json:"followers,omitempty"` - Following *int `json:"following,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - TotalPrivateRepos *int64 `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int64 `json:"owned_private_repos,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - DiskUsage *int `json:"disk_usage,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - BillingEmail *string `json:"billing_email,omitempty"` - Type *string `json:"type,omitempty"` - Plan *Plan `json:"plan,omitempty"` - TwoFactorRequirementEnabled *bool `json:"two_factor_requirement_enabled,omitempty"` - IsVerified *bool `json:"is_verified,omitempty"` - HasOrganizationProjects *bool `json:"has_organization_projects,omitempty"` - HasRepositoryProjects *bool `json:"has_repository_projects,omitempty"` - - // DefaultRepoPermission can be one of: "read", "write", "admin", or "none". (Default: "read"). - // It is only used in OrganizationsService.Edit. - DefaultRepoPermission *string `json:"default_repository_permission,omitempty"` - // DefaultRepoSettings can be one of: "read", "write", "admin", or "none". (Default: "read"). - // It is only used in OrganizationsService.Get. - DefaultRepoSettings *string `json:"default_repository_settings,omitempty"` - - // MembersCanCreateRepos default value is true and is only used in Organizations.Edit. - MembersCanCreateRepos *bool `json:"members_can_create_repositories,omitempty"` - - // https://developer.github.com/changes/2019-12-03-internal-visibility-changes/#rest-v3-api - MembersCanCreatePublicRepos *bool `json:"members_can_create_public_repositories,omitempty"` - MembersCanCreatePrivateRepos *bool `json:"members_can_create_private_repositories,omitempty"` - MembersCanCreateInternalRepos *bool `json:"members_can_create_internal_repositories,omitempty"` - - // MembersCanForkPrivateRepos toggles whether organization members can fork private organization repositories. - MembersCanForkPrivateRepos *bool `json:"members_can_fork_private_repositories,omitempty"` - - // MembersAllowedRepositoryCreationType denotes if organization members can create repositories - // and the type of repositories they can create. Possible values are: "all", "private", or "none". - // - // Deprecated: Use MembersCanCreatePublicRepos, MembersCanCreatePrivateRepos, MembersCanCreateInternalRepos - // instead. The new fields overrides the existing MembersAllowedRepositoryCreationType during 'edit' - // operation and does not consider 'internal' repositories during 'get' operation - MembersAllowedRepositoryCreationType *string `json:"members_allowed_repository_creation_type,omitempty"` - - // MembersCanCreatePages toggles whether organization members can create GitHub Pages sites. - MembersCanCreatePages *bool `json:"members_can_create_pages,omitempty"` - // MembersCanCreatePublicPages toggles whether organization members can create public GitHub Pages sites. - MembersCanCreatePublicPages *bool `json:"members_can_create_public_pages,omitempty"` - // MembersCanCreatePrivatePages toggles whether organization members can create private GitHub Pages sites. - MembersCanCreatePrivatePages *bool `json:"members_can_create_private_pages,omitempty"` - // WebCommitSignoffRequire toggles - WebCommitSignoffRequired *bool `json:"web_commit_signoff_required,omitempty"` - // AdvancedSecurityAuditLogEnabled toggles whether the advanced security audit log is enabled. - AdvancedSecurityEnabledForNewRepos *bool `json:"advanced_security_enabled_for_new_repositories,omitempty"` - // DependabotAlertsEnabled toggles whether dependabot alerts are enabled. - DependabotAlertsEnabledForNewRepos *bool `json:"dependabot_alerts_enabled_for_new_repositories,omitempty"` - // DependabotSecurityUpdatesEnabled toggles whether dependabot security updates are enabled. - DependabotSecurityUpdatesEnabledForNewRepos *bool `json:"dependabot_security_updates_enabled_for_new_repositories,omitempty"` - // DependabotGraphEnabledForNewRepos toggles whether dependabot graph is enabled on new repositories. - DependencyGraphEnabledForNewRepos *bool `json:"dependency_graph_enabled_for_new_repositories,omitempty"` - // SecretScanningEnabled toggles whether secret scanning is enabled on new repositories. - SecretScanningEnabledForNewRepos *bool `json:"secret_scanning_enabled_for_new_repositories,omitempty"` - // SecretScanningPushProtectionEnabledForNewRepos toggles whether secret scanning push protection is enabled on new repositories. - SecretScanningPushProtectionEnabledForNewRepos *bool `json:"secret_scanning_push_protection_enabled_for_new_repositories,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - HooksURL *string `json:"hooks_url,omitempty"` - IssuesURL *string `json:"issues_url,omitempty"` - MembersURL *string `json:"members_url,omitempty"` - PublicMembersURL *string `json:"public_members_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` -} - -// OrganizationInstallations represents GitHub app installations for an organization. -type OrganizationInstallations struct { - TotalCount *int `json:"total_count,omitempty"` - Installations []*Installation `json:"installations,omitempty"` -} - -func (o Organization) String() string { - return Stringify(o) -} - -// Plan represents the payment plan for an account. See plans at https://github.com/plans. -type Plan struct { - Name *string `json:"name,omitempty"` - Space *int `json:"space,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - PrivateRepos *int64 `json:"private_repos,omitempty"` - FilledSeats *int `json:"filled_seats,omitempty"` - Seats *int `json:"seats,omitempty"` -} - -func (p Plan) String() string { - return Stringify(p) -} - -// OrganizationsListOptions specifies the optional parameters to the -// OrganizationsService.ListAll method. -type OrganizationsListOptions struct { - // Since filters Organizations by ID. - Since int64 `url:"since,omitempty"` - - // Note: Pagination is powered exclusively by the Since parameter, - // ListOptions.Page has no effect. - // ListOptions.PerPage controls an undocumented GitHub API parameter. - ListOptions -} - -// ListAll lists all organizations, in the order that they were created on GitHub. -// -// Note: Pagination is powered exclusively by the since parameter. To continue -// listing the next set of organizations, use the ID of the last-returned organization -// as the opts.Since parameter for the next call. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-organizations -func (s *OrganizationsService) ListAll(ctx context.Context, opts *OrganizationsListOptions) ([]*Organization, *Response, error) { - u, err := addOptions("organizations", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - orgs := []*Organization{} - resp, err := s.client.Do(ctx, req, &orgs) - if err != nil { - return nil, resp, err - } - return orgs, resp, nil -} - -// List the organizations for a user. Passing the empty string will list -// organizations for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-organizations-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-organizations-for-a-user -func (s *OrganizationsService) List(ctx context.Context, user string, opts *ListOptions) ([]*Organization, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/orgs", user) - } else { - u = "user/orgs" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var orgs []*Organization - resp, err := s.client.Do(ctx, req, &orgs) - if err != nil { - return nil, resp, err - } - - return orgs, resp, nil -} - -// Get fetches an organization by name. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-an-organization -func (s *OrganizationsService) Get(ctx context.Context, org string) (*Organization, *Response, error) { - u := fmt.Sprintf("orgs/%v", org) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMemberAllowedRepoCreationTypePreview) - - organization := new(Organization) - resp, err := s.client.Do(ctx, req, organization) - if err != nil { - return nil, resp, err - } - - return organization, resp, nil -} - -// GetByID fetches an organization. -// -// Note: GetByID uses the undocumented GitHub API endpoint /organizations/:id. -func (s *OrganizationsService) GetByID(ctx context.Context, id int64) (*Organization, *Response, error) { - u := fmt.Sprintf("organizations/%d", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - organization := new(Organization) - resp, err := s.client.Do(ctx, req, organization) - if err != nil { - return nil, resp, err - } - - return organization, resp, nil -} - -// Edit an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#update-an-organization -func (s *OrganizationsService) Edit(ctx context.Context, name string, org *Organization) (*Organization, *Response, error) { - u := fmt.Sprintf("orgs/%v", name) - req, err := s.client.NewRequest("PATCH", u, org) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeMemberAllowedRepoCreationTypePreview) - - o := new(Organization) - resp, err := s.client.Do(ctx, req, o) - if err != nil { - return nil, resp, err - } - - return o, resp, nil -} - -// Delete an organization by name. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#delete-an-organization -func (s *OrganizationsService) Delete(ctx context.Context, org string) (*Response, error) { - u := fmt.Sprintf("orgs/%v", org) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListInstallations lists installations for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-app-installations-for-an-organization -func (s *OrganizationsService) ListInstallations(ctx context.Context, org string, opts *ListOptions) (*OrganizationInstallations, *Response, error) { - u := fmt.Sprintf("orgs/%v/installations", org) - - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - result := new(OrganizationInstallations) - resp, err := s.client.Do(ctx, req, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go deleted file mode 100644 index e3b35b1df1..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ActionsAllowed represents selected actions that are allowed. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions -type ActionsAllowed struct { - GithubOwnedAllowed *bool `json:"github_owned_allowed,omitempty"` - VerifiedAllowed *bool `json:"verified_allowed,omitempty"` - PatternsAllowed []string `json:"patterns_allowed,omitempty"` -} - -func (a ActionsAllowed) String() string { - return Stringify(a) -} - -// GetActionsAllowed gets the actions that are allowed in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-allowed-actions-and-reusable-workflows-for-an-organization -func (s *OrganizationsService) GetActionsAllowed(ctx context.Context, org string) (*ActionsAllowed, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions/selected-actions", org) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - actionsAllowed := new(ActionsAllowed) - resp, err := s.client.Do(ctx, req, actionsAllowed) - if err != nil { - return nil, resp, err - } - - return actionsAllowed, resp, nil -} - -// EditActionsAllowed sets the actions that are allowed in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-allowed-actions-and-reusable-workflows-for-an-organization -func (s *OrganizationsService) EditActionsAllowed(ctx context.Context, org string, actionsAllowed ActionsAllowed) (*ActionsAllowed, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions/selected-actions", org) - req, err := s.client.NewRequest("PUT", u, actionsAllowed) - if err != nil { - return nil, nil, err - } - - p := new(ActionsAllowed) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go deleted file mode 100644 index 6d1db2ee0a..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ActionsPermissions represents a policy for repositories and allowed actions in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions -type ActionsPermissions struct { - EnabledRepositories *string `json:"enabled_repositories,omitempty"` - AllowedActions *string `json:"allowed_actions,omitempty"` - SelectedActionsURL *string `json:"selected_actions_url,omitempty"` -} - -func (a ActionsPermissions) String() string { - return Stringify(a) -} - -// GetActionsPermissions gets the GitHub Actions permissions policy for repositories and allowed actions in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-github-actions-permissions-for-an-organization -func (s *OrganizationsService) GetActionsPermissions(ctx context.Context, org string) (*ActionsPermissions, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions", org) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - permissions := new(ActionsPermissions) - resp, err := s.client.Do(ctx, req, permissions) - if err != nil { - return nil, resp, err - } - - return permissions, resp, nil -} - -// EditActionsPermissions sets the permissions policy for repositories and allowed actions in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-github-actions-permissions-for-an-organization -func (s *OrganizationsService) EditActionsPermissions(ctx context.Context, org string, actionsPermissions ActionsPermissions) (*ActionsPermissions, *Response, error) { - u := fmt.Sprintf("orgs/%v/actions/permissions", org) - req, err := s.client.NewRequest("PUT", u, actionsPermissions) - if err != nil { - return nil, nil, err - } - - p := new(ActionsPermissions) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go deleted file mode 100644 index e2e4692e57..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GetAuditLogOptions sets up optional parameters to query audit-log endpoint. -type GetAuditLogOptions struct { - Phrase *string `url:"phrase,omitempty"` // A search phrase. (Optional.) - Include *string `url:"include,omitempty"` // Event type includes. Can be one of "web", "git", "all". Default: "web". (Optional.) - Order *string `url:"order,omitempty"` // The order of audit log events. Can be one of "asc" or "desc". Default: "desc". (Optional.) - - ListCursorOptions -} - -// HookConfig describes metadata about a webhook configuration. -type HookConfig struct { - ContentType *string `json:"content_type,omitempty"` - InsecureSSL *string `json:"insecure_ssl,omitempty"` - URL *string `json:"url,omitempty"` - - // Secret is returned obfuscated by GitHub, but it can be set for outgoing requests. - Secret *string `json:"secret,omitempty"` -} - -// ActorLocation contains information about reported location for an actor. -type ActorLocation struct { - CountryCode *string `json:"country_code,omitempty"` -} - -// PolicyOverrideReason contains user-supplied information about why a policy was overridden. -type PolicyOverrideReason struct { - Code *string `json:"code,omitempty"` - Message *string `json:"message,omitempty"` -} - -// AuditEntry describes the fields that may be represented by various audit-log "action" entries. -// For a list of actions see - https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/reviewing-the-audit-log-for-your-organization#audit-log-actions -type AuditEntry struct { - ActorIP *string `json:"actor_ip,omitempty"` - Action *string `json:"action,omitempty"` // The name of the action that was performed, for example `user.login` or `repo.create`. - Active *bool `json:"active,omitempty"` - ActiveWas *bool `json:"active_was,omitempty"` - Actor *string `json:"actor,omitempty"` // The actor who performed the action. - ActorLocation *ActorLocation `json:"actor_location,omitempty"` - BlockedUser *string `json:"blocked_user,omitempty"` - Business *string `json:"business,omitempty"` - CancelledAt *Timestamp `json:"cancelled_at,omitempty"` - CompletedAt *Timestamp `json:"completed_at,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - Config *HookConfig `json:"config,omitempty"` - ConfigWas *HookConfig `json:"config_was,omitempty"` - ContentType *string `json:"content_type,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - DeployKeyFingerprint *string `json:"deploy_key_fingerprint,omitempty"` - DocumentID *string `json:"_document_id,omitempty"` - Emoji *string `json:"emoji,omitempty"` - EnvironmentName *string `json:"environment_name,omitempty"` - Event *string `json:"event,omitempty"` - Events []string `json:"events,omitempty"` - EventsWere []string `json:"events_were,omitempty"` - Explanation *string `json:"explanation,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` - HashedToken *string `json:"hashed_token,omitempty"` - HeadBranch *string `json:"head_branch,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - HookID *int64 `json:"hook_id,omitempty"` - IsHostedRunner *bool `json:"is_hosted_runner,omitempty"` - JobName *string `json:"job_name,omitempty"` - JobWorkflowRef *string `json:"job_workflow_ref,omitempty"` - LimitedAvailability *bool `json:"limited_availability,omitempty"` - Message *string `json:"message,omitempty"` - Name *string `json:"name,omitempty"` - OAuthApplicationID *int64 `json:"oauth_application_id,omitempty"` - OldUser *string `json:"old_user,omitempty"` - OldPermission *string `json:"old_permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`. - OpenSSHPublicKey *string `json:"openssh_public_key,omitempty"` - OperationType *string `json:"operation_type,omitempty"` - Org *string `json:"org,omitempty"` - OrgID *int64 `json:"org_id,omitempty"` - OverriddenCodes []string `json:"overridden_codes,omitempty"` - Permission *string `json:"permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`. - PreviousVisibility *string `json:"previous_visibility,omitempty"` - ProgrammaticAccessType *string `json:"programmatic_access_type,omitempty"` - PullRequestID *int64 `json:"pull_request_id,omitempty"` - PullRequestTitle *string `json:"pull_request_title,omitempty"` - PullRequestURL *string `json:"pull_request_url,omitempty"` - ReadOnly *string `json:"read_only,omitempty"` - Reasons []*PolicyOverrideReason `json:"reasons,omitempty"` - Repo *string `json:"repo,omitempty"` - Repository *string `json:"repository,omitempty"` - RepositoryPublic *bool `json:"repository_public,omitempty"` - RunAttempt *int64 `json:"run_attempt,omitempty"` - RunnerGroupID *int64 `json:"runner_group_id,omitempty"` - RunnerGroupName *string `json:"runner_group_name,omitempty"` - RunnerID *int64 `json:"runner_id,omitempty"` - RunnerLabels []string `json:"runner_labels,omitempty"` - RunnerName *string `json:"runner_name,omitempty"` - RunNumber *int64 `json:"run_number,omitempty"` - SecretsPassed []string `json:"secrets_passed,omitempty"` - SourceVersion *string `json:"source_version,omitempty"` - StartedAt *Timestamp `json:"started_at,omitempty"` - TargetLogin *string `json:"target_login,omitempty"` - TargetVersion *string `json:"target_version,omitempty"` - Team *string `json:"team,omitempty"` - Timestamp *Timestamp `json:"@timestamp,omitempty"` // The time the audit log event occurred, given as a [Unix timestamp](http://en.wikipedia.org/wiki/Unix_time). - TokenID *int64 `json:"token_id,omitempty"` - TokenScopes *string `json:"token_scopes,omitempty"` - Topic *string `json:"topic,omitempty"` - TransportProtocolName *string `json:"transport_protocol_name,omitempty"` // A human readable name for the protocol (for example, HTTP or SSH) used to transfer Git data. - TransportProtocol *int `json:"transport_protocol,omitempty"` // The type of protocol (for example, HTTP=1 or SSH=2) used to transfer Git data. - TriggerID *int64 `json:"trigger_id,omitempty"` - User *string `json:"user,omitempty"` // The user that was affected by the action performed (if available). - UserAgent *string `json:"user_agent,omitempty"` - Visibility *string `json:"visibility,omitempty"` // The repository visibility, for example `public` or `private`. - WorkflowID *int64 `json:"workflow_id,omitempty"` - WorkflowRunID *int64 `json:"workflow_run_id,omitempty"` -} - -// GetAuditLog gets the audit-log entries for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-the-audit-log-for-an-organization -func (s *OrganizationsService) GetAuditLog(ctx context.Context, org string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) { - u := fmt.Sprintf("orgs/%v/audit-log", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var auditEntries []*AuditEntry - resp, err := s.client.Do(ctx, req, &auditEntries) - if err != nil { - return nil, resp, err - } - - return auditEntries, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go deleted file mode 100644 index 7c1b2d6292..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// OrganizationCustomRepoRoles represents custom repository roles available in specified organization. -type OrganizationCustomRepoRoles struct { - TotalCount *int `json:"total_count,omitempty"` - CustomRepoRoles []*CustomRepoRoles `json:"custom_roles,omitempty"` -} - -// CustomRepoRoles represents custom repository roles for an organization. -// See https://docs.github.com/en/enterprise-cloud@latest/organizations/managing-peoples-access-to-your-organization-with-roles/managing-custom-repository-roles-for-an-organization -// for more information. -type CustomRepoRoles struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - BaseRole *string `json:"base_role,omitempty"` - Permissions []string `json:"permissions,omitempty"` -} - -// ListCustomRepoRoles lists the custom repository roles available in this organization. -// In order to see custom repository roles in an organization, the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#list-custom-repository-roles-in-an-organization -func (s *OrganizationsService) ListCustomRepoRoles(ctx context.Context, org string) (*OrganizationCustomRepoRoles, *Response, error) { - u := fmt.Sprintf("orgs/%v/custom-repository-roles", org) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - customRepoRoles := new(OrganizationCustomRepoRoles) - resp, err := s.client.Do(ctx, req, customRepoRoles) - if err != nil { - return nil, resp, err - } - - return customRepoRoles, resp, nil -} - -// CreateOrUpdateCustomRoleOptions represents options required to create or update a custom repository role. -type CreateOrUpdateCustomRoleOptions struct { - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - BaseRole *string `json:"base_role,omitempty"` - Permissions []string `json:"permissions,omitempty"` -} - -// CreateCustomRepoRole creates a custom repository role in this organization. -// In order to create custom repository roles in an organization, the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#create-a-custom-repository-role -func (s *OrganizationsService) CreateCustomRepoRole(ctx context.Context, org string, opts *CreateOrUpdateCustomRoleOptions) (*CustomRepoRoles, *Response, error) { - u := fmt.Sprintf("orgs/%v/custom-repository-roles", org) - - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - resultingRole := new(CustomRepoRoles) - resp, err := s.client.Do(ctx, req, resultingRole) - if err != nil { - return nil, resp, err - } - - return resultingRole, resp, err -} - -// UpdateCustomRepoRole updates a custom repository role in this organization. -// In order to update custom repository roles in an organization, the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#update-a-custom-repository-role -func (s *OrganizationsService) UpdateCustomRepoRole(ctx context.Context, org, roleID string, opts *CreateOrUpdateCustomRoleOptions) (*CustomRepoRoles, *Response, error) { - u := fmt.Sprintf("orgs/%v/custom-repository-roles/%v", org, roleID) - - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - resultingRole := new(CustomRepoRoles) - resp, err := s.client.Do(ctx, req, resultingRole) - if err != nil { - return nil, resp, err - } - - return resultingRole, resp, err -} - -// DeleteCustomRepoRole deletes an existing custom repository role in this organization. -// In order to delete custom repository roles in an organization, the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#delete-a-custom-repository-role -func (s *OrganizationsService) DeleteCustomRepoRole(ctx context.Context, org, roleID string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/custom-repository-roles/%v", org, roleID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - resultingRole := new(CustomRepoRoles) - resp, err := s.client.Do(ctx, req, resultingRole) - if err != nil { - return resp, err - } - - return resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_hooks.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_hooks.go deleted file mode 100644 index c0dd51e24e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_hooks.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2015 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListHooks lists all Hooks for the specified organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#list-organization-webhooks -func (s *OrganizationsService) ListHooks(ctx context.Context, org string, opts *ListOptions) ([]*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var hooks []*Hook - resp, err := s.client.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} - -// GetHook returns a single specified Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#get-an-organization-webhook -func (s *OrganizationsService) GetHook(ctx context.Context, org string, id int64) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - hook := new(Hook) - resp, err := s.client.Do(ctx, req, hook) - if err != nil { - return nil, resp, err - } - - return hook, resp, nil -} - -// CreateHook creates a Hook for the specified org. -// Config is a required field. -// -// Note that only a subset of the hook fields are used and hook must -// not be nil. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#create-an-organization-webhook -func (s *OrganizationsService) CreateHook(ctx context.Context, org string, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks", org) - - hookReq := &createHookRequest{ - Name: "web", - Events: hook.Events, - Active: hook.Active, - Config: hook.Config, - } - - req, err := s.client.NewRequest("POST", u, hookReq) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// EditHook updates a specified Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#update-an-organization-webhook -func (s *OrganizationsService) EditHook(ctx context.Context, org string, id int64, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// PingHook triggers a 'ping' event to be sent to the Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#ping-an-organization-webhook -func (s *OrganizationsService) PingHook(ctx context.Context, org string, id int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d/pings", org, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteHook deletes a specified Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#delete-an-organization-webhook -func (s *OrganizationsService) DeleteHook(ctx context.Context, org string, id int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_hooks_deliveries.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_hooks_deliveries.go deleted file mode 100644 index 1bfad409ea..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_hooks_deliveries.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListHookDeliveries lists webhook deliveries for a webhook configured in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#list-deliveries-for-an-organization-webhook -func (s *OrganizationsService) ListHookDeliveries(ctx context.Context, org string, id int64, opts *ListCursorOptions) ([]*HookDelivery, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%v/deliveries", org, id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - deliveries := []*HookDelivery{} - resp, err := s.client.Do(ctx, req, &deliveries) - if err != nil { - return nil, resp, err - } - - return deliveries, resp, nil -} - -// GetHookDelivery returns a delivery for a webhook configured in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#get-a-webhook-delivery-for-an-organization-webhook -func (s *OrganizationsService) GetHookDelivery(ctx context.Context, owner string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%v/deliveries/%v", owner, hookID, deliveryID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - h := new(HookDelivery) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// RedeliverHookDelivery redelivers a delivery for a webhook configured in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#redeliver-a-delivery-for-an-organization-webhook -func (s *OrganizationsService) RedeliverHookDelivery(ctx context.Context, owner string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%v/deliveries/%v/attempts", owner, hookID, deliveryID) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - h := new(HookDelivery) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_members.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_members.go deleted file mode 100644 index 79f8a65333..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_members.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Membership represents the status of a user's membership in an organization or team. -type Membership struct { - URL *string `json:"url,omitempty"` - - // State is the user's status within the organization or team. - // Possible values are: "active", "pending" - State *string `json:"state,omitempty"` - - // Role identifies the user's role within the organization or team. - // Possible values for organization membership: - // member - non-owner organization member - // admin - organization owner - // - // Possible values for team membership are: - // member - a normal member of the team - // maintainer - a team maintainer. Able to add/remove other team - // members, promote other team members to team - // maintainer, and edit the team’s name and description - Role *string `json:"role,omitempty"` - - // For organization membership, the API URL of the organization. - OrganizationURL *string `json:"organization_url,omitempty"` - - // For organization membership, the organization the membership is for. - Organization *Organization `json:"organization,omitempty"` - - // For organization membership, the user the membership is for. - User *User `json:"user,omitempty"` -} - -func (m Membership) String() string { - return Stringify(m) -} - -// ListMembersOptions specifies optional parameters to the -// OrganizationsService.ListMembers method. -type ListMembersOptions struct { - // If true (or if the authenticated user is not an owner of the - // organization), list only publicly visible members. - PublicOnly bool `url:"-"` - - // Filter members returned in the list. Possible values are: - // 2fa_disabled, all. Default is "all". - Filter string `url:"filter,omitempty"` - - // Role filters members returned by their role in the organization. - // Possible values are: - // all - all members of the organization, regardless of role - // admin - organization owners - // member - non-owner organization members - // - // Default is "all". - Role string `url:"role,omitempty"` - - ListOptions -} - -// ListMembers lists the members for an organization. If the authenticated -// user is an owner of the organization, this will return both concealed and -// public members, otherwise it will only return public members. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-organization-members -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-public-organization-members -func (s *OrganizationsService) ListMembers(ctx context.Context, org string, opts *ListMembersOptions) ([]*User, *Response, error) { - var u string - if opts != nil && opts.PublicOnly { - u = fmt.Sprintf("orgs/%v/public_members", org) - } else { - u = fmt.Sprintf("orgs/%v/members", org) - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var members []*User - resp, err := s.client.Do(ctx, req, &members) - if err != nil { - return nil, resp, err - } - - return members, resp, nil -} - -// IsMember checks if a user is a member of an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#check-organization-membership-for-a-user -func (s *OrganizationsService) IsMember(ctx context.Context, org, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/members/%v", org, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// IsPublicMember checks if a user is a public member of an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#check-public-organization-membership-for-a-user -func (s *OrganizationsService) IsPublicMember(ctx context.Context, org, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// RemoveMember removes a user from all teams of an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#remove-an-organization-member -func (s *OrganizationsService) RemoveMember(ctx context.Context, org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/members/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// PublicizeMembership publicizes a user's membership in an organization. (A -// user cannot publicize the membership for another user.) -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#set-public-organization-membership-for-the-authenticated-user -func (s *OrganizationsService) PublicizeMembership(ctx context.Context, org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ConcealMembership conceals a user's membership in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#remove-public-organization-membership-for-the-authenticated-user -func (s *OrganizationsService) ConcealMembership(ctx context.Context, org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListOrgMembershipsOptions specifies optional parameters to the -// OrganizationsService.ListOrgMemberships method. -type ListOrgMembershipsOptions struct { - // Filter memberships to include only those with the specified state. - // Possible values are: "active", "pending". - State string `url:"state,omitempty"` - - ListOptions -} - -// ListOrgMemberships lists the organization memberships for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-organization-memberships-for-the-authenticated-user -func (s *OrganizationsService) ListOrgMemberships(ctx context.Context, opts *ListOrgMembershipsOptions) ([]*Membership, *Response, error) { - u := "user/memberships/orgs" - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var memberships []*Membership - resp, err := s.client.Do(ctx, req, &memberships) - if err != nil { - return nil, resp, err - } - - return memberships, resp, nil -} - -// GetOrgMembership gets the membership for a user in a specified organization. -// Passing an empty string for user will get the membership for the -// authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#get-an-organization-membership-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#get-organization-membership-for-a-user -func (s *OrganizationsService) GetOrgMembership(ctx context.Context, user, org string) (*Membership, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("orgs/%v/memberships/%v", org, user) - } else { - u = fmt.Sprintf("user/memberships/orgs/%v", org) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - membership := new(Membership) - resp, err := s.client.Do(ctx, req, membership) - if err != nil { - return nil, resp, err - } - - return membership, resp, nil -} - -// EditOrgMembership edits the membership for user in specified organization. -// Passing an empty string for user will edit the membership for the -// authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#update-an-organization-membership-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#set-organization-membership-for-a-user -func (s *OrganizationsService) EditOrgMembership(ctx context.Context, user, org string, membership *Membership) (*Membership, *Response, error) { - var u, method string - if user != "" { - u = fmt.Sprintf("orgs/%v/memberships/%v", org, user) - method = "PUT" - } else { - u = fmt.Sprintf("user/memberships/orgs/%v", org) - method = "PATCH" - } - - req, err := s.client.NewRequest(method, u, membership) - if err != nil { - return nil, nil, err - } - - m := new(Membership) - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// RemoveOrgMembership removes user from the specified organization. If the -// user has been invited to the organization, this will cancel their invitation. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#remove-organization-membership-for-a-user -func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, org string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/memberships/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListPendingOrgInvitations returns a list of pending invitations. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-pending-organization-invitations -func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org string, opts *ListOptions) ([]*Invitation, *Response, error) { - u := fmt.Sprintf("orgs/%v/invitations", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pendingInvitations []*Invitation - resp, err := s.client.Do(ctx, req, &pendingInvitations) - if err != nil { - return nil, resp, err - } - - return pendingInvitations, resp, nil -} - -// CreateOrgInvitationOptions specifies the parameters to the OrganizationService.Invite -// method. -type CreateOrgInvitationOptions struct { - // GitHub user ID for the person you are inviting. Not required if you provide Email. - InviteeID *int64 `json:"invitee_id,omitempty"` - // Email address of the person you are inviting, which can be an existing GitHub user. - // Not required if you provide InviteeID - Email *string `json:"email,omitempty"` - // Specify role for new member. Can be one of: - // * admin - Organization owners with full administrative rights to the - // organization and complete access to all repositories and teams. - // * direct_member - Non-owner organization members with ability to see - // other members and join teams by invitation. - // * billing_manager - Non-owner organization members with ability to - // manage the billing settings of your organization. - // Default is "direct_member". - Role *string `json:"role,omitempty"` - TeamID []int64 `json:"team_ids,omitempty"` -} - -// CreateOrgInvitation invites people to an organization by using their GitHub user ID or their email address. -// In order to create invitations in an organization, -// the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#create-an-organization-invitation -func (s *OrganizationsService) CreateOrgInvitation(ctx context.Context, org string, opts *CreateOrgInvitationOptions) (*Invitation, *Response, error) { - u := fmt.Sprintf("orgs/%v/invitations", org) - - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - var invitation *Invitation - resp, err := s.client.Do(ctx, req, &invitation) - if err != nil { - return nil, resp, err - } - - return invitation, resp, nil -} - -// ListOrgInvitationTeams lists all teams associated with an invitation. In order to see invitations in an organization, -// the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-organization-invitation-teams -func (s *OrganizationsService) ListOrgInvitationTeams(ctx context.Context, org, invitationID string, opts *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/invitations/%v/teams", org, invitationID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var orgInvitationTeams []*Team - resp, err := s.client.Do(ctx, req, &orgInvitationTeams) - if err != nil { - return nil, resp, err - } - - return orgInvitationTeams, resp, nil -} - -// ListFailedOrgInvitations returns a list of failed inviatations. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-failed-organization-invitations -func (s *OrganizationsService) ListFailedOrgInvitations(ctx context.Context, org string, opts *ListOptions) ([]*Invitation, *Response, error) { - u := fmt.Sprintf("orgs/%v/failed_invitations", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var failedInvitations []*Invitation - resp, err := s.client.Do(ctx, req, &failedInvitations) - if err != nil { - return nil, resp, err - } - - return failedInvitations, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_outside_collaborators.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_outside_collaborators.go deleted file mode 100644 index 506a494603..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_outside_collaborators.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListOutsideCollaboratorsOptions specifies optional parameters to the -// OrganizationsService.ListOutsideCollaborators method. -type ListOutsideCollaboratorsOptions struct { - // Filter outside collaborators returned in the list. Possible values are: - // 2fa_disabled, all. Default is "all". - Filter string `url:"filter,omitempty"` - - ListOptions -} - -// ListOutsideCollaborators lists outside collaborators of organization's repositories. -// This will only work if the authenticated -// user is an owner of the organization. -// -// Warning: The API may change without advance notice during the preview period. -// Preview features are not supported for production use. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/outside-collaborators#list-outside-collaborators-for-an-organization -func (s *OrganizationsService) ListOutsideCollaborators(ctx context.Context, org string, opts *ListOutsideCollaboratorsOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("orgs/%v/outside_collaborators", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var members []*User - resp, err := s.client.Do(ctx, req, &members) - if err != nil { - return nil, resp, err - } - - return members, resp, nil -} - -// RemoveOutsideCollaborator removes a user from the list of outside collaborators; -// consequently, removing them from all the organization's repositories. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/outside-collaborators#remove-outside-collaborator-from-an-organization -func (s *OrganizationsService) RemoveOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/outside_collaborators/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ConvertMemberToOutsideCollaborator reduces the permission level of a member of the -// organization to that of an outside collaborator. Therefore, they will only -// have access to the repositories that their current team membership allows. -// Responses for converting a non-member or the last owner to an outside collaborator -// are listed in GitHub API docs. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/outside-collaborators#convert-an-organization-member-to-outside-collaborator -func (s *OrganizationsService) ConvertMemberToOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/outside_collaborators/%v", org, user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_packages.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_packages.go deleted file mode 100644 index 0ae68aaa36..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_packages.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// List the packages for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#list-packages-for-an-organization -func (s *OrganizationsService) ListPackages(ctx context.Context, org string, opts *PackageListOptions) ([]*Package, *Response, error) { - u := fmt.Sprintf("orgs/%v/packages", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var packages []*Package - resp, err := s.client.Do(ctx, req, &packages) - if err != nil { - return nil, resp, err - } - - return packages, resp, nil -} - -// Get a package by name from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-for-an-organization -func (s *OrganizationsService) GetPackage(ctx context.Context, org, packageType, packageName string) (*Package, *Response, error) { - u := fmt.Sprintf("orgs/%v/packages/%v/%v", org, packageType, packageName) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pack *Package - resp, err := s.client.Do(ctx, req, &pack) - if err != nil { - return nil, resp, err - } - - return pack, resp, nil -} - -// Delete a package from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-for-an-organization -func (s *OrganizationsService) DeletePackage(ctx context.Context, org, packageType, packageName string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/packages/%v/%v", org, packageType, packageName) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Restore a package to an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-for-an-organization -func (s *OrganizationsService) RestorePackage(ctx context.Context, org, packageType, packageName string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/packages/%v/%v/restore", org, packageType, packageName) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Get all versions of a package in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#list-package-versions-for-a-package-owned-by-an-organization -func (s *OrganizationsService) PackageGetAllVersions(ctx context.Context, org, packageType, packageName string, opts *PackageListOptions) ([]*PackageVersion, *Response, error) { - u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions", org, packageType, packageName) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var versions []*PackageVersion - resp, err := s.client.Do(ctx, req, &versions) - if err != nil { - return nil, resp, err - } - - return versions, resp, nil -} - -// Get a specific version of a package in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-version-for-an-organization -func (s *OrganizationsService) PackageGetVersion(ctx context.Context, org, packageType, packageName string, packageVersionID int64) (*PackageVersion, *Response, error) { - u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions/%v", org, packageType, packageName, packageVersionID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var version *PackageVersion - resp, err := s.client.Do(ctx, req, &version) - if err != nil { - return nil, resp, err - } - - return version, resp, nil -} - -// Delete a package version from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#delete-package-version-for-an-organization -func (s *OrganizationsService) PackageDeleteVersion(ctx context.Context, org, packageType, packageName string, packageVersionID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions/%v", org, packageType, packageName, packageVersionID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Restore a package version to an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#restore-package-version-for-an-organization -func (s *OrganizationsService) PackageRestoreVersion(ctx context.Context, org, packageType, packageName string, packageVersionID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions/%v/restore", org, packageType, packageName, packageVersionID) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_projects.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_projects.go deleted file mode 100644 index d49eae54dc..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_projects.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListProjects lists the projects for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-organization-projects -func (s *OrganizationsService) ListProjects(ctx context.Context, org string, opts *ProjectListOptions) ([]*Project, *Response, error) { - u := fmt.Sprintf("orgs/%v/projects", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - var projects []*Project - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// CreateProject creates a GitHub Project for the specified organization. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-an-organization-project -func (s *OrganizationsService) CreateProject(ctx context.Context, org string, opts *ProjectOptions) (*Project, *Response, error) { - u := fmt.Sprintf("orgs/%v/projects", org) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_rules.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_rules.go deleted file mode 100644 index a3905af8fb..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_rules.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GetAllOrganizationRulesets gets all the rulesets for the specified organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-all-organization-repository-rulesets -func (s *OrganizationsService) GetAllOrganizationRulesets(ctx context.Context, org string) ([]*Ruleset, *Response, error) { - u := fmt.Sprintf("orgs/%v/rulesets", org) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rulesets []*Ruleset - resp, err := s.client.Do(ctx, req, &rulesets) - if err != nil { - return nil, resp, err - } - - return rulesets, resp, nil -} - -// CreateOrganizationRuleset creates a ruleset for the specified organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#create-an-organization-repository-ruleset -func (s *OrganizationsService) CreateOrganizationRuleset(ctx context.Context, org string, rs *Ruleset) (*Ruleset, *Response, error) { - u := fmt.Sprintf("orgs/%v/rulesets", org) - - req, err := s.client.NewRequest("POST", u, rs) - if err != nil { - return nil, nil, err - } - - var ruleset *Ruleset - resp, err := s.client.Do(ctx, req, &ruleset) - if err != nil { - return nil, resp, err - } - - return ruleset, resp, nil -} - -// GetOrganizationRuleset gets a ruleset from the specified organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-an-organization-repository-ruleset -func (s *OrganizationsService) GetOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Ruleset, *Response, error) { - u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var ruleset *Ruleset - resp, err := s.client.Do(ctx, req, &ruleset) - if err != nil { - return nil, resp, err - } - - return ruleset, resp, nil -} - -// UpdateOrganizationRuleset updates a ruleset from the specified organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#update-an-organization-repository-ruleset -func (s *OrganizationsService) UpdateOrganizationRuleset(ctx context.Context, org string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) { - u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID) - - req, err := s.client.NewRequest("PUT", u, rs) - if err != nil { - return nil, nil, err - } - - var ruleset *Ruleset - resp, err := s.client.Do(ctx, req, &ruleset) - if err != nil { - return nil, resp, err - } - - return ruleset, resp, nil -} - -// DeleteOrganizationRuleset deletes a ruleset from the specified organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#delete-an-organization-repository-ruleset -func (s *OrganizationsService) DeleteOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_security_managers.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_security_managers.go deleted file mode 100644 index a3f002e0e1..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_security_managers.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListSecurityManagerTeams lists all security manager teams for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/security-managers#list-security-manager-teams -func (s *OrganizationsService) ListSecurityManagerTeams(ctx context.Context, org string) ([]*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/security-managers", org) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// AddSecurityManagerTeam adds a team to the list of security managers for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/security-managers#add-a-security-manager-team -func (s *OrganizationsService) AddSecurityManagerTeam(ctx context.Context, org, team string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/security-managers/teams/%v", org, team) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveSecurityManagerTeam removes a team from the list of security managers for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/security-managers#remove-a-security-manager-team -func (s *OrganizationsService) RemoveSecurityManagerTeam(ctx context.Context, org, team string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/security-managers/teams/%v", org, team) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_users_blocking.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_users_blocking.go deleted file mode 100644 index 9c6cf60269..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/orgs_users_blocking.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListBlockedUsers lists all the users blocked by an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#list-users-blocked-by-an-organization -func (s *OrganizationsService) ListBlockedUsers(ctx context.Context, org string, opts *ListOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("orgs/%v/blocks", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - var blockedUsers []*User - resp, err := s.client.Do(ctx, req, &blockedUsers) - if err != nil { - return nil, resp, err - } - - return blockedUsers, resp, nil -} - -// IsBlocked reports whether specified user is blocked from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#check-if-a-user-is-blocked-by-an-organization -func (s *OrganizationsService) IsBlocked(ctx context.Context, org string, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - resp, err := s.client.Do(ctx, req, nil) - isBlocked, err := parseBoolResponse(err) - return isBlocked, resp, err -} - -// BlockUser blocks specified user from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#block-a-user-from-an-organization -func (s *OrganizationsService) BlockUser(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnblockUser unblocks specified user from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#unblock-a-user-from-an-organization -func (s *OrganizationsService) UnblockUser(ctx context.Context, org string, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/packages.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/packages.go deleted file mode 100644 index ef7df07405..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/packages.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2020 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// Package represents a GitHub package. -type Package struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - PackageType *string `json:"package_type,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Owner *User `json:"owner,omitempty"` - PackageVersion *PackageVersion `json:"package_version,omitempty"` - Registry *PackageRegistry `json:"registry,omitempty"` - URL *string `json:"url,omitempty"` - VersionCount *int64 `json:"version_count,omitempty"` - Visibility *string `json:"visibility,omitempty"` - Repository *Repository `json:"repository,omitempty"` -} - -func (p Package) String() string { - return Stringify(p) -} - -// PackageVersion represents a GitHub package version. -type PackageVersion struct { - ID *int64 `json:"id,omitempty"` - Version *string `json:"version,omitempty"` - Summary *string `json:"summary,omitempty"` - Body *string `json:"body,omitempty"` - BodyHTML *string `json:"body_html,omitempty"` - Release *PackageRelease `json:"release,omitempty"` - Manifest *string `json:"manifest,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - TargetOID *string `json:"target_oid,omitempty"` - Draft *bool `json:"draft,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - PackageFiles []*PackageFile `json:"package_files,omitempty"` - Author *User `json:"author,omitempty"` - InstallationCommand *string `json:"installation_command,omitempty"` - Metadata *PackageMetadata `json:"metadata,omitempty"` - PackageHTMLURL *string `json:"package_html_url,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` -} - -func (pv PackageVersion) String() string { - return Stringify(pv) -} - -// PackageRelease represents a GitHub package version release. -type PackageRelease struct { - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - ID *int64 `json:"id,omitempty"` - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - Name *string `json:"name,omitempty"` - Draft *bool `json:"draft,omitempty"` - Author *User `json:"author,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PublishedAt *Timestamp `json:"published_at,omitempty"` -} - -func (r PackageRelease) String() string { - return Stringify(r) -} - -// PackageFile represents a GitHub package version release file. -type PackageFile struct { - DownloadURL *string `json:"download_url,omitempty"` - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - SHA256 *string `json:"sha256,omitempty"` - SHA1 *string `json:"sha1,omitempty"` - MD5 *string `json:"md5,omitempty"` - ContentType *string `json:"content_type,omitempty"` - State *string `json:"state,omitempty"` - Author *User `json:"author,omitempty"` - Size *int64 `json:"size,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -func (pf PackageFile) String() string { - return Stringify(pf) -} - -// PackageRegistry represents a GitHub package registry. -type PackageRegistry struct { - AboutURL *string `json:"about_url,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - URL *string `json:"url,omitempty"` - Vendor *string `json:"vendor,omitempty"` -} - -func (r PackageRegistry) String() string { - return Stringify(r) -} - -// PackageListOptions represents the optional list options for a package. -type PackageListOptions struct { - // Visibility of packages "public", "internal" or "private". - Visibility *string `url:"visibility,omitempty"` - - // PackageType represents the type of package. - // It can be one of "npm", "maven", "rubygems", "nuget", "docker", or "container". - PackageType *string `url:"package_type,omitempty"` - - // State of package either "active" or "deleted". - State *string `url:"state,omitempty"` - - ListOptions -} - -// PackageMetadata represents metadata from a package. -type PackageMetadata struct { - PackageType *string `json:"package_type,omitempty"` - Container *PackageContainerMetadata `json:"container,omitempty"` -} - -func (r PackageMetadata) String() string { - return Stringify(r) -} - -// PackageContainerMetadata represents container metadata for docker container packages. -type PackageContainerMetadata struct { - Tags []string `json:"tags,omitempty"` -} - -func (r PackageContainerMetadata) String() string { - return Stringify(r) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/projects.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/projects.go deleted file mode 100644 index df7ad6cd97..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/projects.go +++ /dev/null @@ -1,596 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ProjectsService provides access to the projects functions in the -// GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/projects -type ProjectsService service - -// Project represents a GitHub Project. -type Project struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - ColumnsURL *string `json:"columns_url,omitempty"` - OwnerURL *string `json:"owner_url,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` - OrganizationPermission *string `json:"organization_permission,omitempty"` - Private *bool `json:"private,omitempty"` - - // The User object that generated the project. - Creator *User `json:"creator,omitempty"` -} - -func (p Project) String() string { - return Stringify(p) -} - -// GetProject gets a GitHub Project for a repo. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#get-a-project -func (s *ProjectsService) GetProject(ctx context.Context, id int64) (*Project, *Response, error) { - u := fmt.Sprintf("projects/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} - -// ProjectOptions specifies the parameters to the -// RepositoriesService.CreateProject and -// ProjectsService.UpdateProject methods. -type ProjectOptions struct { - // The name of the project. (Required for creation; optional for update.) - Name *string `json:"name,omitempty"` - // The body of the project. (Optional.) - Body *string `json:"body,omitempty"` - - // The following field(s) are only applicable for update. - // They should be left with zero values for creation. - - // State of the project. Either "open" or "closed". (Optional.) - State *string `json:"state,omitempty"` - // The permission level that all members of the project's organization - // will have on this project. - // Setting the organization permission is only available - // for organization projects. (Optional.) - OrganizationPermission *string `json:"organization_permission,omitempty"` - // Sets visibility of the project within the organization. - // Setting visibility is only available - // for organization projects.(Optional.) - Private *bool `json:"private,omitempty"` -} - -// UpdateProject updates a repository project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#update-a-project -func (s *ProjectsService) UpdateProject(ctx context.Context, id int64, opts *ProjectOptions) (*Project, *Response, error) { - u := fmt.Sprintf("projects/%v", id) - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} - -// DeleteProject deletes a GitHub Project from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#delete-a-project -func (s *ProjectsService) DeleteProject(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("projects/%v", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectColumn represents a column of a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/projects/ -type ProjectColumn struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - ProjectURL *string `json:"project_url,omitempty"` - CardsURL *string `json:"cards_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// ListProjectColumns lists the columns of a GitHub Project for a repo. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/columns#list-project-columns -func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int64, opts *ListOptions) ([]*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/%v/columns", projectID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - columns := []*ProjectColumn{} - resp, err := s.client.Do(ctx, req, &columns) - if err != nil { - return nil, resp, err - } - - return columns, resp, nil -} - -// GetProjectColumn gets a column of a GitHub Project for a repo. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/columns#get-a-project-column -func (s *ProjectsService) GetProjectColumn(ctx context.Context, id int64) (*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/columns/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - column := &ProjectColumn{} - resp, err := s.client.Do(ctx, req, column) - if err != nil { - return nil, resp, err - } - - return column, resp, nil -} - -// ProjectColumnOptions specifies the parameters to the -// ProjectsService.CreateProjectColumn and -// ProjectsService.UpdateProjectColumn methods. -type ProjectColumnOptions struct { - // The name of the project column. (Required for creation and update.) - Name string `json:"name"` -} - -// CreateProjectColumn creates a column for the specified (by number) project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/columns#create-a-project-column -func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int64, opts *ProjectColumnOptions) (*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/%v/columns", projectID) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - column := &ProjectColumn{} - resp, err := s.client.Do(ctx, req, column) - if err != nil { - return nil, resp, err - } - - return column, resp, nil -} - -// UpdateProjectColumn updates a column of a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/columns#update-an-existing-project-column -func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int64, opts *ProjectColumnOptions) (*ProjectColumn, *Response, error) { - u := fmt.Sprintf("projects/columns/%v", columnID) - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - column := &ProjectColumn{} - resp, err := s.client.Do(ctx, req, column) - if err != nil { - return nil, resp, err - } - - return column, resp, nil -} - -// DeleteProjectColumn deletes a column from a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/columns#delete-a-project-column -func (s *ProjectsService) DeleteProjectColumn(ctx context.Context, columnID int64) (*Response, error) { - u := fmt.Sprintf("projects/columns/%v", columnID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectColumnMoveOptions specifies the parameters to the -// ProjectsService.MoveProjectColumn method. -type ProjectColumnMoveOptions struct { - // Position can be one of "first", "last", or "after:", where - // is the ID of a column in the same project. (Required.) - Position string `json:"position"` -} - -// MoveProjectColumn moves a column within a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/columns#move-a-project-column -func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int64, opts *ProjectColumnMoveOptions) (*Response, error) { - u := fmt.Sprintf("projects/columns/%v/moves", columnID) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectCard represents a card in a column of a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/cards/#get-a-project-card -type ProjectCard struct { - URL *string `json:"url,omitempty"` - ColumnURL *string `json:"column_url,omitempty"` - ContentURL *string `json:"content_url,omitempty"` - ID *int64 `json:"id,omitempty"` - Note *string `json:"note,omitempty"` - Creator *User `json:"creator,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Archived *bool `json:"archived,omitempty"` - - // The following fields are only populated by Webhook events. - ColumnID *int64 `json:"column_id,omitempty"` - - // The following fields are only populated by Events API. - ProjectID *int64 `json:"project_id,omitempty"` - ProjectURL *string `json:"project_url,omitempty"` - ColumnName *string `json:"column_name,omitempty"` - PreviousColumnName *string `json:"previous_column_name,omitempty"` // Populated in "moved_columns_in_project" event deliveries. -} - -// ProjectCardListOptions specifies the optional parameters to the -// ProjectsService.ListProjectCards method. -type ProjectCardListOptions struct { - // ArchivedState is used to list all, archived, or not_archived project cards. - // Defaults to not_archived when you omit this parameter. - ArchivedState *string `url:"archived_state,omitempty"` - - ListOptions -} - -// ListProjectCards lists the cards in a column of a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/cards#list-project-cards -func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int64, opts *ProjectCardListOptions) ([]*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/%v/cards", columnID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - cards := []*ProjectCard{} - resp, err := s.client.Do(ctx, req, &cards) - if err != nil { - return nil, resp, err - } - - return cards, resp, nil -} - -// GetProjectCard gets a card in a column of a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/cards#get-a-project-card -func (s *ProjectsService) GetProjectCard(ctx context.Context, cardID int64) (*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v", cardID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - card := &ProjectCard{} - resp, err := s.client.Do(ctx, req, card) - if err != nil { - return nil, resp, err - } - - return card, resp, nil -} - -// ProjectCardOptions specifies the parameters to the -// ProjectsService.CreateProjectCard and -// ProjectsService.UpdateProjectCard methods. -type ProjectCardOptions struct { - // The note of the card. Note and ContentID are mutually exclusive. - Note string `json:"note,omitempty"` - // The ID (not Number) of the Issue to associate with this card. - // Note and ContentID are mutually exclusive. - ContentID int64 `json:"content_id,omitempty"` - // The type of content to associate with this card. Possible values are: "Issue" and "PullRequest". - ContentType string `json:"content_type,omitempty"` - // Use true to archive a project card. - // Specify false if you need to restore a previously archived project card. - Archived *bool `json:"archived,omitempty"` -} - -// CreateProjectCard creates a card in the specified column of a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/cards#create-a-project-card -func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int64, opts *ProjectCardOptions) (*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/%v/cards", columnID) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - card := &ProjectCard{} - resp, err := s.client.Do(ctx, req, card) - if err != nil { - return nil, resp, err - } - - return card, resp, nil -} - -// UpdateProjectCard updates a card of a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/cards#update-an-existing-project-card -func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int64, opts *ProjectCardOptions) (*ProjectCard, *Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v", cardID) - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - card := &ProjectCard{} - resp, err := s.client.Do(ctx, req, card) - if err != nil { - return nil, resp, err - } - - return card, resp, nil -} - -// DeleteProjectCard deletes a card from a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/cards#delete-a-project-card -func (s *ProjectsService) DeleteProjectCard(ctx context.Context, cardID int64) (*Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v", cardID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectCardMoveOptions specifies the parameters to the -// ProjectsService.MoveProjectCard method. -type ProjectCardMoveOptions struct { - // Position can be one of "top", "bottom", or "after:", where - // is the ID of a card in the same project. - Position string `json:"position"` - // ColumnID is the ID of a column in the same project. Note that ColumnID - // is required when using Position "after:" when that card is in - // another column; otherwise it is optional. - ColumnID int64 `json:"column_id,omitempty"` -} - -// MoveProjectCard moves a card within a GitHub Project. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/cards#move-a-project-card -func (s *ProjectsService) MoveProjectCard(ctx context.Context, cardID int64, opts *ProjectCardMoveOptions) (*Response, error) { - u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ProjectCollaboratorOptions specifies the optional parameters to the -// ProjectsService.AddProjectCollaborator method. -type ProjectCollaboratorOptions struct { - // Permission specifies the permission to grant to the collaborator. - // Possible values are: - // "read" - can read, but not write to or administer this project. - // "write" - can read and write, but not administer this project. - // "admin" - can read, write and administer this project. - // - // Default value is "write" - Permission *string `json:"permission,omitempty"` -} - -// AddProjectCollaborator adds a collaborator to an organization project and sets -// their permission level. You must be an organization owner or a project admin to add a collaborator. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#add-project-collaborator -func (s *ProjectsService) AddProjectCollaborator(ctx context.Context, id int64, username string, opts *ProjectCollaboratorOptions) (*Response, error) { - u := fmt.Sprintf("projects/%v/collaborators/%v", id, username) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// RemoveProjectCollaborator removes a collaborator from an organization project. -// You must be an organization owner or a project admin to remove a collaborator. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#remove-user-as-a-collaborator -func (s *ProjectsService) RemoveProjectCollaborator(ctx context.Context, id int64, username string) (*Response, error) { - u := fmt.Sprintf("projects/%v/collaborators/%v", id, username) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - return s.client.Do(ctx, req, nil) -} - -// ListCollaboratorOptions specifies the optional parameters to the -// ProjectsService.ListProjectCollaborators method. -type ListCollaboratorOptions struct { - // Affiliation specifies how collaborators should be filtered by their affiliation. - // Possible values are: - // "outside" - All outside collaborators of an organization-owned repository - // "direct" - All collaborators with permissions to an organization-owned repository, - // regardless of organization membership status - // "all" - All collaborators the authenticated user can see - // - // Default value is "all". - Affiliation *string `url:"affiliation,omitempty"` - - ListOptions -} - -// ListProjectCollaborators lists the collaborators for an organization project. For a project, -// the list of collaborators includes outside collaborators, organization members that are direct -// collaborators, organization members with access through team memberships, organization members -// with access through default organization permissions, and organization owners. You must be an -// organization owner or a project admin to list collaborators. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#list-project-collaborators -func (s *ProjectsService) ListProjectCollaborators(ctx context.Context, id int64, opts *ListCollaboratorOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("projects/%v/collaborators", id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// ProjectPermissionLevel represents the permission level an organization -// member has for a given project. -type ProjectPermissionLevel struct { - // Possible values: "admin", "write", "read", "none" - Permission *string `json:"permission,omitempty"` - - User *User `json:"user,omitempty"` -} - -// ReviewProjectCollaboratorPermission returns the collaborator's permission level for an organization -// project. Possible values for the permission key: "admin", "write", "read", "none". -// You must be an organization owner or a project admin to review a user's permission level. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#get-project-permission-for-a-user -func (s *ProjectsService) ReviewProjectCollaboratorPermission(ctx context.Context, id int64, username string) (*ProjectPermissionLevel, *Response, error) { - u := fmt.Sprintf("projects/%v/collaborators/%v/permission", id, username) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - ppl := new(ProjectPermissionLevel) - resp, err := s.client.Do(ctx, req, ppl) - if err != nil { - return nil, resp, err - } - return ppl, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls.go deleted file mode 100644 index 6e49eba2f9..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls.go +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" -) - -// PullRequestsService handles communication with the pull request related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/ -type PullRequestsService service - -// PullRequestAutoMerge represents the "auto_merge" response for a PullRequest. -type PullRequestAutoMerge struct { - EnabledBy *User `json:"enabled_by,omitempty"` - MergeMethod *string `json:"merge_method,omitempty"` - CommitTitle *string `json:"commit_title,omitempty"` - CommitMessage *string `json:"commit_message,omitempty"` -} - -// PullRequest represents a GitHub pull request on a repository. -type PullRequest struct { - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Locked *bool `json:"locked,omitempty"` - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - ClosedAt *Timestamp `json:"closed_at,omitempty"` - MergedAt *Timestamp `json:"merged_at,omitempty"` - Labels []*Label `json:"labels,omitempty"` - User *User `json:"user,omitempty"` - Draft *bool `json:"draft,omitempty"` - Merged *bool `json:"merged,omitempty"` - Mergeable *bool `json:"mergeable,omitempty"` - MergeableState *string `json:"mergeable_state,omitempty"` - MergedBy *User `json:"merged_by,omitempty"` - MergeCommitSHA *string `json:"merge_commit_sha,omitempty"` - Rebaseable *bool `json:"rebaseable,omitempty"` - Comments *int `json:"comments,omitempty"` - Commits *int `json:"commits,omitempty"` - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - ChangedFiles *int `json:"changed_files,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - IssueURL *string `json:"issue_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` - CommitsURL *string `json:"commits_url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - ReviewCommentsURL *string `json:"review_comments_url,omitempty"` - ReviewCommentURL *string `json:"review_comment_url,omitempty"` - ReviewComments *int `json:"review_comments,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Assignees []*User `json:"assignees,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"` - AuthorAssociation *string `json:"author_association,omitempty"` - NodeID *string `json:"node_id,omitempty"` - RequestedReviewers []*User `json:"requested_reviewers,omitempty"` - AutoMerge *PullRequestAutoMerge `json:"auto_merge,omitempty"` - - // RequestedTeams is populated as part of the PullRequestEvent. - // See, https://docs.github.com/en/developers/webhooks-and-events/github-event-types#pullrequestevent for an example. - RequestedTeams []*Team `json:"requested_teams,omitempty"` - - Links *PRLinks `json:"_links,omitempty"` - Head *PullRequestBranch `json:"head,omitempty"` - Base *PullRequestBranch `json:"base,omitempty"` - - // ActiveLockReason is populated only when LockReason is provided while locking the pull request. - // Possible values are: "off-topic", "too heated", "resolved", and "spam". - ActiveLockReason *string `json:"active_lock_reason,omitempty"` -} - -func (p PullRequest) String() string { - return Stringify(p) -} - -// PRLink represents a single link object from GitHub pull request _links. -type PRLink struct { - HRef *string `json:"href,omitempty"` -} - -// PRLinks represents the "_links" object in a GitHub pull request. -type PRLinks struct { - Self *PRLink `json:"self,omitempty"` - HTML *PRLink `json:"html,omitempty"` - Issue *PRLink `json:"issue,omitempty"` - Comments *PRLink `json:"comments,omitempty"` - ReviewComments *PRLink `json:"review_comments,omitempty"` - ReviewComment *PRLink `json:"review_comment,omitempty"` - Commits *PRLink `json:"commits,omitempty"` - Statuses *PRLink `json:"statuses,omitempty"` -} - -// PullRequestBranch represents a base or head branch in a GitHub pull request. -type PullRequestBranch struct { - Label *string `json:"label,omitempty"` - Ref *string `json:"ref,omitempty"` - SHA *string `json:"sha,omitempty"` - Repo *Repository `json:"repo,omitempty"` - User *User `json:"user,omitempty"` -} - -// PullRequestListOptions specifies the optional parameters to the -// PullRequestsService.List method. -type PullRequestListOptions struct { - // State filters pull requests based on their state. Possible values are: - // open, closed, all. Default is "open". - State string `url:"state,omitempty"` - - // Head filters pull requests by head user and branch name in the format of: - // "user:ref-name". - Head string `url:"head,omitempty"` - - // Base filters pull requests by base branch name. - Base string `url:"base,omitempty"` - - // Sort specifies how to sort pull requests. Possible values are: created, - // updated, popularity, long-running. Default is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort pull requests. Possible values are: asc, desc. - // If Sort is "created" or not specified, Default is "desc", otherwise Default - // is "asc" - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// List the pull requests for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#list-pull-requests -func (s *PullRequestsService) List(ctx context.Context, owner string, repo string, opts *PullRequestListOptions) ([]*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pulls []*PullRequest - resp, err := s.client.Do(ctx, req, &pulls) - if err != nil { - return nil, resp, err - } - - return pulls, resp, nil -} - -// ListPullRequestsWithCommit returns pull requests associated with a commit SHA. -// -// The results may include open and closed pull requests. -// By default, the PullRequestListOptions State filters for "open". -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#list-pull-requests-associated-with-a-commit -func (s *PullRequestsService) ListPullRequestsWithCommit(ctx context.Context, owner, repo, sha string, opts *PullRequestListOptions) ([]*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/pulls", owner, repo, sha) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeListPullsOrBranchesForCommitPreview) - var pulls []*PullRequest - resp, err := s.client.Do(ctx, req, &pulls) - if err != nil { - return nil, resp, err - } - - return pulls, resp, nil -} - -// Get a single pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#get-a-pull-request -func (s *PullRequestsService) Get(ctx context.Context, owner string, repo string, number int) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - pull := new(PullRequest) - resp, err := s.client.Do(ctx, req, pull) - if err != nil { - return nil, resp, err - } - - return pull, resp, nil -} - -// GetRaw gets a single pull request in raw (diff or patch) format. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#get-a-pull-request -func (s *PullRequestsService) GetRaw(ctx context.Context, owner string, repo string, number int, opts RawOptions) (string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - switch opts.Type { - case Diff: - req.Header.Set("Accept", mediaTypeV3Diff) - case Patch: - req.Header.Set("Accept", mediaTypeV3Patch) - default: - return "", nil, fmt.Errorf("unsupported raw type %d", opts.Type) - } - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// NewPullRequest represents a new pull request to be created. -type NewPullRequest struct { - Title *string `json:"title,omitempty"` - Head *string `json:"head,omitempty"` - Base *string `json:"base,omitempty"` - Body *string `json:"body,omitempty"` - Issue *int `json:"issue,omitempty"` - MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"` - Draft *bool `json:"draft,omitempty"` -} - -// Create a new pull request on the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#create-a-pull-request -func (s *PullRequestsService) Create(ctx context.Context, owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) - req, err := s.client.NewRequest("POST", u, pull) - if err != nil { - return nil, nil, err - } - - p := new(PullRequest) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// PullRequestBranchUpdateOptions specifies the optional parameters to the -// PullRequestsService.UpdateBranch method. -type PullRequestBranchUpdateOptions struct { - // ExpectedHeadSHA specifies the most recent commit on the pull request's branch. - // Default value is the SHA of the pull request's current HEAD ref. - ExpectedHeadSHA *string `json:"expected_head_sha,omitempty"` -} - -// PullRequestBranchUpdateResponse specifies the response of pull request branch update. -type PullRequestBranchUpdateResponse struct { - Message *string `json:"message,omitempty"` - URL *string `json:"url,omitempty"` -} - -// UpdateBranch updates the pull request branch with latest upstream changes. -// -// This method might return an AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it has now scheduled the update of the pull request branch in a background task. -// A follow up request, after a delay of a second or so, should result -// in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#update-a-pull-request-branch -func (s *PullRequestsService) UpdateBranch(ctx context.Context, owner, repo string, number int, opts *PullRequestBranchUpdateOptions) (*PullRequestBranchUpdateResponse, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/update-branch", owner, repo, number) - - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeUpdatePullRequestBranchPreview) - - p := new(PullRequestBranchUpdateResponse) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -type pullRequestUpdate struct { - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - State *string `json:"state,omitempty"` - Base *string `json:"base,omitempty"` - MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"` -} - -// Edit a pull request. -// pull must not be nil. -// -// The following fields are editable: Title, Body, State, Base.Ref and MaintainerCanModify. -// Base.Ref updates the base branch of the pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#update-a-pull-request -func (s *PullRequestsService) Edit(ctx context.Context, owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) { - if pull == nil { - return nil, nil, fmt.Errorf("pull must be provided") - } - - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - - update := &pullRequestUpdate{ - Title: pull.Title, - Body: pull.Body, - State: pull.State, - MaintainerCanModify: pull.MaintainerCanModify, - } - // avoid updating the base branch when closing the Pull Request - // - otherwise the GitHub API server returns a "Validation Failed" error: - // "Cannot change base branch of closed pull request". - if pull.Base != nil && pull.GetState() != "closed" { - update.Base = pull.Base.Ref - } - - req, err := s.client.NewRequest("PATCH", u, update) - if err != nil { - return nil, nil, err - } - - p := new(PullRequest) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListCommits lists the commits in a pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#list-commits-on-a-pull-request -func (s *PullRequestsService) ListCommits(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/commits", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var commits []*RepositoryCommit - resp, err := s.client.Do(ctx, req, &commits) - if err != nil { - return nil, resp, err - } - - return commits, resp, nil -} - -// ListFiles lists the files in a pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#list-pull-requests-files -func (s *PullRequestsService) ListFiles(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*CommitFile, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/files", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var commitFiles []*CommitFile - resp, err := s.client.Do(ctx, req, &commitFiles) - if err != nil { - return nil, resp, err - } - - return commitFiles, resp, nil -} - -// IsMerged checks if a pull request has been merged. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#check-if-a-pull-request-has-been-merged -func (s *PullRequestsService) IsMerged(ctx context.Context, owner string, repo string, number int) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - merged, err := parseBoolResponse(err) - return merged, resp, err -} - -// PullRequestMergeResult represents the result of merging a pull request. -type PullRequestMergeResult struct { - SHA *string `json:"sha,omitempty"` - Merged *bool `json:"merged,omitempty"` - Message *string `json:"message,omitempty"` -} - -// PullRequestOptions lets you define how a pull request will be merged. -type PullRequestOptions struct { - CommitTitle string // Title for the automatic commit message. (Optional.) - SHA string // SHA that pull request head must match to allow merge. (Optional.) - - // The merge method to use. Possible values include: "merge", "squash", and "rebase" with the default being merge. (Optional.) - MergeMethod string - - // If false, an empty string commit message will use the default commit message. If true, an empty string commit message will be used. - DontDefaultIfBlank bool -} - -type pullRequestMergeRequest struct { - CommitMessage *string `json:"commit_message,omitempty"` - CommitTitle string `json:"commit_title,omitempty"` - MergeMethod string `json:"merge_method,omitempty"` - SHA string `json:"sha,omitempty"` -} - -// Merge a pull request. -// commitMessage is an extra detail to append to automatic commit message. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#merge-a-pull-request -func (s *PullRequestsService) Merge(ctx context.Context, owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) - - pullRequestBody := &pullRequestMergeRequest{} - if commitMessage != "" { - pullRequestBody.CommitMessage = &commitMessage - } - if options != nil { - pullRequestBody.CommitTitle = options.CommitTitle - pullRequestBody.MergeMethod = options.MergeMethod - pullRequestBody.SHA = options.SHA - if options.DontDefaultIfBlank && commitMessage == "" { - pullRequestBody.CommitMessage = &commitMessage - } - } - req, err := s.client.NewRequest("PUT", u, pullRequestBody) - if err != nil { - return nil, nil, err - } - - mergeResult := new(PullRequestMergeResult) - resp, err := s.client.Do(ctx, req, mergeResult) - if err != nil { - return nil, resp, err - } - - return mergeResult, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_comments.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_comments.go deleted file mode 100644 index 1f6b726d85..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_comments.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strings" - "time" -) - -// PullRequestComment represents a comment left on a pull request. -type PullRequestComment struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - InReplyTo *int64 `json:"in_reply_to_id,omitempty"` - Body *string `json:"body,omitempty"` - Path *string `json:"path,omitempty"` - DiffHunk *string `json:"diff_hunk,omitempty"` - PullRequestReviewID *int64 `json:"pull_request_review_id,omitempty"` - Position *int `json:"position,omitempty"` - OriginalPosition *int `json:"original_position,omitempty"` - StartLine *int `json:"start_line,omitempty"` - Line *int `json:"line,omitempty"` - OriginalLine *int `json:"original_line,omitempty"` - OriginalStartLine *int `json:"original_start_line,omitempty"` - Side *string `json:"side,omitempty"` - StartSide *string `json:"start_side,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - OriginalCommitID *string `json:"original_commit_id,omitempty"` - User *User `json:"user,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - // AuthorAssociation is the comment author's relationship to the pull request's repository. - // Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE". - AuthorAssociation *string `json:"author_association,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - PullRequestURL *string `json:"pull_request_url,omitempty"` -} - -func (p PullRequestComment) String() string { - return Stringify(p) -} - -// PullRequestListCommentsOptions specifies the optional parameters to the -// PullRequestsService.ListComments method. -type PullRequestListCommentsOptions struct { - // Sort specifies how to sort comments. Possible values are: created, updated. - Sort string `url:"sort,omitempty"` - - // Direction in which to sort comments. Possible values are: asc, desc. - Direction string `url:"direction,omitempty"` - - // Since filters comments by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListComments lists all comments on the specified pull request. Specifying a -// pull request number of 0 will return all comments on all pull requests for -// the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#list-review-comments-on-a-pull-request -// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#list-review-comments-in-a-repository -func (s *PullRequestsService) ListComments(ctx context.Context, owner, repo string, number int, opts *PullRequestListCommentsOptions) ([]*PullRequestComment, *Response, error) { - var u string - if number == 0 { - u = fmt.Sprintf("repos/%v/%v/pulls/comments", owner, repo) - } else { - u = fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeReactionsPreview, mediaTypeMultiLineCommentsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var comments []*PullRequestComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetComment fetches the specified pull request comment. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#get-a-review-comment-for-a-pull-request -func (s *PullRequestsService) GetComment(ctx context.Context, owner, repo string, commentID int64) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeReactionsPreview, mediaTypeMultiLineCommentsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - comment := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, comment) - if err != nil { - return nil, resp, err - } - - return comment, resp, nil -} - -// CreateComment creates a new comment on the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#create-a-review-comment-for-a-pull-request -func (s *PullRequestsService) CreateComment(ctx context.Context, owner, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - // TODO: remove custom Accept headers when their respective API fully launches. - acceptHeaders := []string{mediaTypeReactionsPreview, mediaTypeMultiLineCommentsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - c := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// CreateCommentInReplyTo creates a new comment as a reply to an existing pull request comment. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#create-a-review-comment-for-a-pull-request -func (s *PullRequestsService) CreateCommentInReplyTo(ctx context.Context, owner, repo string, number int, body string, commentID int64) (*PullRequestComment, *Response, error) { - comment := &struct { - Body string `json:"body,omitempty"` - InReplyTo int64 `json:"in_reply_to,omitempty"` - }{ - Body: body, - InReplyTo: commentID, - } - u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// EditComment updates a pull request comment. -// A non-nil comment.Body must be provided. Other comment fields should be left nil. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#update-a-review-comment-for-a-pull-request -func (s *PullRequestsService) EditComment(ctx context.Context, owner, repo string, commentID int64, comment *PullRequestComment) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(PullRequestComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes a pull request comment. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#delete-a-review-comment-for-a-pull-request -func (s *PullRequestsService) DeleteComment(ctx context.Context, owner, repo string, commentID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_reviewers.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_reviewers.go deleted file mode 100644 index 1c336540b8..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_reviewers.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ReviewersRequest specifies users and teams for a pull request review request. -type ReviewersRequest struct { - NodeID *string `json:"node_id,omitempty"` - Reviewers []string `json:"reviewers,omitempty"` - TeamReviewers []string `json:"team_reviewers,omitempty"` -} - -// Reviewers represents reviewers of a pull request. -type Reviewers struct { - Users []*User `json:"users,omitempty"` - Teams []*Team `json:"teams,omitempty"` -} - -// RequestReviewers creates a review request for the provided reviewers for the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/review-requests#request-reviewers-for-a-pull-request -func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number) - req, err := s.client.NewRequest("POST", u, &reviewers) - if err != nil { - return nil, nil, err - } - - r := new(PullRequest) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// ListReviewers lists reviewers whose reviews have been requested on the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/review-requests#list-requested-reviewers-for-a-pull-request -func (s *PullRequestsService) ListReviewers(ctx context.Context, owner, repo string, number int, opts *ListOptions) (*Reviewers, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/requested_reviewers", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - reviewers := new(Reviewers) - resp, err := s.client.Do(ctx, req, reviewers) - if err != nil { - return nil, resp, err - } - - return reviewers, resp, nil -} - -// RemoveReviewers removes the review request for the provided reviewers for the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/review-requests#remove-requested-reviewers-from-a-pull-request -func (s *PullRequestsService) RemoveReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, &reviewers) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_reviews.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_reviews.go deleted file mode 100644 index addcce4683..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_reviews.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "errors" - "fmt" -) - -var ErrMixedCommentStyles = errors.New("cannot use both position and side/line form comments") - -// PullRequestReview represents a review of a pull request. -type PullRequestReview struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - User *User `json:"user,omitempty"` - Body *string `json:"body,omitempty"` - SubmittedAt *Timestamp `json:"submitted_at,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - PullRequestURL *string `json:"pull_request_url,omitempty"` - State *string `json:"state,omitempty"` - // AuthorAssociation is the comment author's relationship to the issue's repository. - // Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE". - AuthorAssociation *string `json:"author_association,omitempty"` -} - -func (p PullRequestReview) String() string { - return Stringify(p) -} - -// DraftReviewComment represents a comment part of the review. -type DraftReviewComment struct { - Path *string `json:"path,omitempty"` - Position *int `json:"position,omitempty"` - Body *string `json:"body,omitempty"` - - // The new comfort-fade-preview fields - StartSide *string `json:"start_side,omitempty"` - Side *string `json:"side,omitempty"` - StartLine *int `json:"start_line,omitempty"` - Line *int `json:"line,omitempty"` -} - -func (c DraftReviewComment) String() string { - return Stringify(c) -} - -// PullRequestReviewRequest represents a request to create a review. -type PullRequestReviewRequest struct { - NodeID *string `json:"node_id,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - Body *string `json:"body,omitempty"` - Event *string `json:"event,omitempty"` - Comments []*DraftReviewComment `json:"comments,omitempty"` -} - -func (r PullRequestReviewRequest) String() string { - return Stringify(r) -} - -func (r *PullRequestReviewRequest) isComfortFadePreview() (bool, error) { - var isCF *bool - for _, comment := range r.Comments { - if comment == nil { - continue - } - hasPos := comment.Position != nil - hasComfortFade := (comment.StartSide != nil) || (comment.Side != nil) || - (comment.StartLine != nil) || (comment.Line != nil) - - switch { - case hasPos && hasComfortFade: - return false, ErrMixedCommentStyles - case hasPos && isCF != nil && *isCF: - return false, ErrMixedCommentStyles - case hasComfortFade && isCF != nil && !*isCF: - return false, ErrMixedCommentStyles - } - isCF = &hasComfortFade - } - if isCF != nil { - return *isCF, nil - } - return false, nil -} - -// PullRequestReviewDismissalRequest represents a request to dismiss a review. -type PullRequestReviewDismissalRequest struct { - Message *string `json:"message,omitempty"` -} - -func (r PullRequestReviewDismissalRequest) String() string { - return Stringify(r) -} - -// ListReviews lists all reviews on the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#list-reviews-for-a-pull-request -func (s *PullRequestsService) ListReviews(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var reviews []*PullRequestReview - resp, err := s.client.Do(ctx, req, &reviews) - if err != nil { - return nil, resp, err - } - - return reviews, resp, nil -} - -// GetReview fetches the specified pull request review. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#get-a-review-for-a-pull-request -func (s *PullRequestsService) GetReview(ctx context.Context, owner, repo string, number int, reviewID int64) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - review := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, review) - if err != nil { - return nil, resp, err - } - - return review, resp, nil -} - -// DeletePendingReview deletes the specified pull request pending review. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#delete-a-pending-review-for-a-pull-request -func (s *PullRequestsService) DeletePendingReview(ctx context.Context, owner, repo string, number int, reviewID int64) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, nil, err - } - - review := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, review) - if err != nil { - return nil, resp, err - } - - return review, resp, nil -} - -// ListReviewComments lists all the comments for the specified review. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#list-comments-for-a-pull-request-review -func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, repo string, number int, reviewID int64, opts *ListOptions) ([]*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/comments", owner, repo, number, reviewID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var comments []*PullRequestComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// CreateReview creates a new review on the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#create-a-review-for-a-pull-request -// -// In order to use multi-line comments, you must use the "comfort fade" preview. -// This replaces the use of the "Position" field in comments with 4 new fields: -// -// [Start]Side, and [Start]Line. -// -// These new fields must be used for ALL comments (including single-line), -// with the following restrictions (empirically observed, so subject to change). -// -// For single-line "comfort fade" comments, you must use: -// -// Path: &path, // as before -// Body: &body, // as before -// Side: &"RIGHT" (or "LEFT") -// Line: &123, // NOT THE SAME AS POSITION, this is an actual line number. -// -// If StartSide or StartLine is used with single-line comments, a 422 is returned. -// -// For multi-line "comfort fade" comments, you must use: -// -// Path: &path, // as before -// Body: &body, // as before -// StartSide: &"RIGHT" (or "LEFT") -// Side: &"RIGHT" (or "LEFT") -// StartLine: &120, -// Line: &125, -// -// Suggested edits are made by commenting on the lines to replace, and including the -// suggested edit in a block like this (it may be surrounded in non-suggestion markdown): -// -// ```suggestion -// Use this instead. -// It is waaaaaay better. -// ``` -func (s *PullRequestsService) CreateReview(ctx context.Context, owner, repo string, number int, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number) - - req, err := s.client.NewRequest("POST", u, review) - if err != nil { - return nil, nil, err - } - - // Detect which style of review comment is being used. - if isCF, err := review.isComfortFadePreview(); err != nil { - return nil, nil, err - } else if isCF { - // If the review comments are using the comfort fade preview fields, - // then pass the comfort fade header. - req.Header.Set("Accept", mediaTypeMultiLineCommentsPreview) - } - - r := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// UpdateReview updates the review summary on the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#update-a-review-for-a-pull-request -func (s *PullRequestsService) UpdateReview(ctx context.Context, owner, repo string, number int, reviewID int64, body string) (*PullRequestReview, *Response, error) { - opts := &struct { - Body string `json:"body"` - }{Body: body} - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, nil, err - } - - review := &PullRequestReview{} - resp, err := s.client.Do(ctx, req, review) - if err != nil { - return nil, resp, err - } - - return review, resp, nil -} - -// SubmitReview submits a specified review on the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#submit-a-review-for-a-pull-request -func (s *PullRequestsService) SubmitReview(ctx context.Context, owner, repo string, number int, reviewID int64, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/events", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("POST", u, review) - if err != nil { - return nil, nil, err - } - - r := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DismissReview dismisses a specified review on the specified pull request. -// -// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#dismiss-a-review-for-a-pull-request -func (s *PullRequestsService) DismissReview(ctx context.Context, owner, repo string, number int, reviewID int64, review *PullRequestReviewDismissalRequest) (*PullRequestReview, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/dismissals", owner, repo, number, reviewID) - - req, err := s.client.NewRequest("PUT", u, review) - if err != nil { - return nil, nil, err - } - - r := new(PullRequestReview) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_threads.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_threads.go deleted file mode 100644 index 23e924d88f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/pulls_threads.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// PullRequestThread represents a thread of comments on a pull request. -type PullRequestThread struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Comments []*PullRequestComment `json:"comments,omitempty"` -} - -func (p PullRequestThread) String() string { - return Stringify(p) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/reactions.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/reactions.go deleted file mode 100644 index 14d193ae88..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/reactions.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/http" -) - -// ReactionsService provides access to the reactions-related functions in the -// GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions -type ReactionsService service - -// Reaction represents a GitHub reaction. -type Reaction struct { - // ID is the Reaction ID. - ID *int64 `json:"id,omitempty"` - User *User `json:"user,omitempty"` - NodeID *string `json:"node_id,omitempty"` - // Content is the type of reaction. - // Possible values are: - // "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". - Content *string `json:"content,omitempty"` -} - -// Reactions represents a summary of GitHub reactions. -type Reactions struct { - TotalCount *int `json:"total_count,omitempty"` - PlusOne *int `json:"+1,omitempty"` - MinusOne *int `json:"-1,omitempty"` - Laugh *int `json:"laugh,omitempty"` - Confused *int `json:"confused,omitempty"` - Heart *int `json:"heart,omitempty"` - Hooray *int `json:"hooray,omitempty"` - Rocket *int `json:"rocket,omitempty"` - Eyes *int `json:"eyes,omitempty"` - URL *string `json:"url,omitempty"` -} - -func (r Reaction) String() string { - return Stringify(r) -} - -// ListCommentReactionOptions specifies the optional parameters to the -// ReactionsService.ListCommentReactions method. -type ListCommentReactionOptions struct { - // Content restricts the returned comment reactions to only those with the given type. - // Omit this parameter to list all reactions to a commit comment. - // Possible values are: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". - Content string `url:"content,omitempty"` - - ListOptions -} - -// ListCommentReactions lists the reactions for a commit comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-commit-comment -func (s *ReactionsService) ListCommentReactions(ctx context.Context, owner, repo string, id int64, opts *ListCommentReactionOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateCommentReaction creates a reaction for a commit comment. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-commit-comment -func (s *ReactionsService) CreateCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteCommentReaction deletes the reaction for a commit comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-commit-comment-reaction -func (s *ReactionsService) DeleteCommentReaction(ctx context.Context, owner, repo string, commentID, reactionID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions/%v", owner, repo, commentID, reactionID) - - return s.deleteReaction(ctx, u) -} - -// DeleteCommentReactionByID deletes the reaction for a commit comment by repository ID. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-commit-comment-reaction -func (s *ReactionsService) DeleteCommentReactionByID(ctx context.Context, repoID, commentID, reactionID int64) (*Response, error) { - u := fmt.Sprintf("repositories/%v/comments/%v/reactions/%v", repoID, commentID, reactionID) - - return s.deleteReaction(ctx, u) -} - -// ListIssueReactions lists the reactions for an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-an-issue -func (s *ReactionsService) ListIssueReactions(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateIssueReaction creates a reaction for an issue. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-an-issue -func (s *ReactionsService) CreateIssueReaction(ctx context.Context, owner, repo string, number int, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteIssueReaction deletes the reaction to an issue. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-reaction -func (s *ReactionsService) DeleteIssueReaction(ctx context.Context, owner, repo string, issueNumber int, reactionID int64) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/issues/%v/reactions/%v", owner, repo, issueNumber, reactionID) - - return s.deleteReaction(ctx, url) -} - -// DeleteIssueReactionByID deletes the reaction to an issue by repository ID. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-reaction -func (s *ReactionsService) DeleteIssueReactionByID(ctx context.Context, repoID, issueNumber int, reactionID int64) (*Response, error) { - url := fmt.Sprintf("repositories/%v/issues/%v/reactions/%v", repoID, issueNumber, reactionID) - - return s.deleteReaction(ctx, url) -} - -// ListIssueCommentReactions lists the reactions for an issue comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-an-issue-comment -func (s *ReactionsService) ListIssueCommentReactions(ctx context.Context, owner, repo string, id int64, opts *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateIssueCommentReaction creates a reaction for an issue comment. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-an-issue-comment -func (s *ReactionsService) CreateIssueCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteIssueCommentReaction deletes the reaction to an issue comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-comment-reaction -func (s *ReactionsService) DeleteIssueCommentReaction(ctx context.Context, owner, repo string, commentID, reactionID int64) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions/%v", owner, repo, commentID, reactionID) - - return s.deleteReaction(ctx, url) -} - -// DeleteIssueCommentReactionByID deletes the reaction to an issue comment by repository ID. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-comment-reaction -func (s *ReactionsService) DeleteIssueCommentReactionByID(ctx context.Context, repoID, commentID, reactionID int64) (*Response, error) { - url := fmt.Sprintf("repositories/%v/issues/comments/%v/reactions/%v", repoID, commentID, reactionID) - - return s.deleteReaction(ctx, url) -} - -// ListPullRequestCommentReactions lists the reactions for a pull request review comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-pull-request-review-comment -func (s *ReactionsService) ListPullRequestCommentReactions(ctx context.Context, owner, repo string, id int64, opts *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreatePullRequestCommentReaction creates a reaction for a pull request review comment. -// Note that if you have already created a reaction of type content, the -// previously created reaction will be returned with Status: 200 OK. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-pull-request-review-comment -func (s *ReactionsService) CreatePullRequestCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeletePullRequestCommentReaction deletes the reaction to a pull request review comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-pull-request-comment-reaction -func (s *ReactionsService) DeletePullRequestCommentReaction(ctx context.Context, owner, repo string, commentID, reactionID int64) (*Response, error) { - url := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions/%v", owner, repo, commentID, reactionID) - - return s.deleteReaction(ctx, url) -} - -// DeletePullRequestCommentReactionByID deletes the reaction to a pull request review comment by repository ID. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-pull-request-comment-reaction -func (s *ReactionsService) DeletePullRequestCommentReactionByID(ctx context.Context, repoID, commentID, reactionID int64) (*Response, error) { - url := fmt.Sprintf("repositories/%v/pulls/comments/%v/reactions/%v", repoID, commentID, reactionID) - - return s.deleteReaction(ctx, url) -} - -// ListTeamDiscussionReactions lists the reactions for a team discussion. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-team-discussion-legacy -func (s *ReactionsService) ListTeamDiscussionReactions(ctx context.Context, teamID int64, discussionNumber int, opts *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/reactions", teamID, discussionNumber) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateTeamDiscussionReaction creates a reaction for a team discussion. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion-legacy -func (s *ReactionsService) CreateTeamDiscussionReaction(ctx context.Context, teamID int64, discussionNumber int, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/reactions", teamID, discussionNumber) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteTeamDiscussionReaction deletes the reaction to a team discussion. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-team-discussion-reaction -func (s *ReactionsService) DeleteTeamDiscussionReaction(ctx context.Context, org, teamSlug string, discussionNumber int, reactionID int64) (*Response, error) { - url := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/reactions/%v", org, teamSlug, discussionNumber, reactionID) - - return s.deleteReaction(ctx, url) -} - -// DeleteTeamDiscussionReactionByOrgIDAndTeamID deletes the reaction to a team discussion by organization ID and team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion -func (s *ReactionsService) DeleteTeamDiscussionReactionByOrgIDAndTeamID(ctx context.Context, orgID, teamID, discussionNumber int, reactionID int64) (*Response, error) { - url := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/reactions/%v", orgID, teamID, discussionNumber, reactionID) - - return s.deleteReaction(ctx, url) -} - -// ListTeamDiscussionCommentReactions lists the reactions for a team discussion comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-team-discussion-comment-legacy -func (s *ReactionsService) ListTeamDiscussionCommentReactions(ctx context.Context, teamID int64, discussionNumber, commentNumber int, opts *ListOptions) ([]*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v/reactions", teamID, discussionNumber, commentNumber) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var m []*Reaction - resp, err := s.client.Do(ctx, req, &m) - if err != nil { - return nil, resp, err - } - return m, resp, nil -} - -// CreateTeamDiscussionCommentReaction creates a reaction for a team discussion comment. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion-comment-legacy -func (s *ReactionsService) CreateTeamDiscussionCommentReaction(ctx context.Context, teamID int64, discussionNumber, commentNumber int, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v/reactions", teamID, discussionNumber, commentNumber) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteTeamDiscussionCommentReaction deletes the reaction to a team discussion comment. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-team-discussion-comment-reaction -func (s *ReactionsService) DeleteTeamDiscussionCommentReaction(ctx context.Context, org, teamSlug string, discussionNumber, commentNumber int, reactionID int64) (*Response, error) { - url := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v/reactions/%v", org, teamSlug, discussionNumber, commentNumber, reactionID) - - return s.deleteReaction(ctx, url) -} - -// DeleteTeamDiscussionCommentReactionByOrgIDAndTeamID deletes the reaction to a team discussion comment by organization ID and team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion-comment -func (s *ReactionsService) DeleteTeamDiscussionCommentReactionByOrgIDAndTeamID(ctx context.Context, orgID, teamID, discussionNumber, commentNumber int, reactionID int64) (*Response, error) { - url := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v/reactions/%v", orgID, teamID, discussionNumber, commentNumber, reactionID) - - return s.deleteReaction(ctx, url) -} - -func (s *ReactionsService) deleteReaction(ctx context.Context, url string) (*Response, error) { - req, err := s.client.NewRequest(http.MethodDelete, url, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - return s.client.Do(ctx, req, nil) -} - -// Create a reaction to a release. -// Note that a response with a Status: 200 OK means that you already -// added the reaction type to this release. -// The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". -// -// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-release -func (s *ReactionsService) CreateReleaseReaction(ctx context.Context, owner, repo string, releaseID int64, content string) (*Reaction, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/releases/%v/reactions", owner, repo, releaseID) - - body := &Reaction{Content: String(content)} - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeReactionsPreview) - - m := &Reaction{} - resp, err := s.client.Do(ctx, req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos.go deleted file mode 100644 index 5ffad6dd3c..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos.go +++ /dev/null @@ -1,2036 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" -) - -const githubBranchNotProtected string = "Branch not protected" - -var ErrBranchNotProtected = errors.New("branch is not protected") - -// RepositoriesService handles communication with the repository related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/ -type RepositoriesService service - -// Repository represents a GitHub repository. -type Repository struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Description *string `json:"description,omitempty"` - Homepage *string `json:"homepage,omitempty"` - CodeOfConduct *CodeOfConduct `json:"code_of_conduct,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - MirrorURL *string `json:"mirror_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` - Language *string `json:"language,omitempty"` - Fork *bool `json:"fork,omitempty"` - ForksCount *int `json:"forks_count,omitempty"` - NetworkCount *int `json:"network_count,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` // Deprecated: Replaced by OpenIssuesCount. For backward compatibility OpenIssues is still populated. - StargazersCount *int `json:"stargazers_count,omitempty"` - SubscribersCount *int `json:"subscribers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility WatchersCount is still populated. - Watchers *int `json:"watchers,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility Watchers is still populated. - Size *int `json:"size,omitempty"` - AutoInit *bool `json:"auto_init,omitempty"` - Parent *Repository `json:"parent,omitempty"` - Source *Repository `json:"source,omitempty"` - TemplateRepository *Repository `json:"template_repository,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Permissions map[string]bool `json:"permissions,omitempty"` - AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` - AllowUpdateBranch *bool `json:"allow_update_branch,omitempty"` - AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` - AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` - AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` - AllowForking *bool `json:"allow_forking,omitempty"` - WebCommitSignoffRequired *bool `json:"web_commit_signoff_required,omitempty"` - DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` - UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` - SquashMergeCommitTitle *string `json:"squash_merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "COMMIT_OR_PR_TITLE" - SquashMergeCommitMessage *string `json:"squash_merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "COMMIT_MESSAGES", "BLANK" - MergeCommitTitle *string `json:"merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "MERGE_MESSAGE" - MergeCommitMessage *string `json:"merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "PR_TITLE", "BLANK" - Topics []string `json:"topics,omitempty"` - Archived *bool `json:"archived,omitempty"` - Disabled *bool `json:"disabled,omitempty"` - - // Only provided when using RepositoriesService.Get while in preview - License *License `json:"license,omitempty"` - - // Additional mutable fields when creating and editing a repository - Private *bool `json:"private,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - HasPages *bool `json:"has_pages,omitempty"` - HasProjects *bool `json:"has_projects,omitempty"` - HasDownloads *bool `json:"has_downloads,omitempty"` - HasDiscussions *bool `json:"has_discussions,omitempty"` - IsTemplate *bool `json:"is_template,omitempty"` - LicenseTemplate *string `json:"license_template,omitempty"` - GitignoreTemplate *string `json:"gitignore_template,omitempty"` - - // Options for configuring Advanced Security and Secret Scanning - SecurityAndAnalysis *SecurityAndAnalysis `json:"security_and_analysis,omitempty"` - - // Creating an organization repository. Required for non-owners. - TeamID *int64 `json:"team_id,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - ArchiveURL *string `json:"archive_url,omitempty"` - AssigneesURL *string `json:"assignees_url,omitempty"` - BlobsURL *string `json:"blobs_url,omitempty"` - BranchesURL *string `json:"branches_url,omitempty"` - CollaboratorsURL *string `json:"collaborators_url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - CommitsURL *string `json:"commits_url,omitempty"` - CompareURL *string `json:"compare_url,omitempty"` - ContentsURL *string `json:"contents_url,omitempty"` - ContributorsURL *string `json:"contributors_url,omitempty"` - DeploymentsURL *string `json:"deployments_url,omitempty"` - DownloadsURL *string `json:"downloads_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - ForksURL *string `json:"forks_url,omitempty"` - GitCommitsURL *string `json:"git_commits_url,omitempty"` - GitRefsURL *string `json:"git_refs_url,omitempty"` - GitTagsURL *string `json:"git_tags_url,omitempty"` - HooksURL *string `json:"hooks_url,omitempty"` - IssueCommentURL *string `json:"issue_comment_url,omitempty"` - IssueEventsURL *string `json:"issue_events_url,omitempty"` - IssuesURL *string `json:"issues_url,omitempty"` - KeysURL *string `json:"keys_url,omitempty"` - LabelsURL *string `json:"labels_url,omitempty"` - LanguagesURL *string `json:"languages_url,omitempty"` - MergesURL *string `json:"merges_url,omitempty"` - MilestonesURL *string `json:"milestones_url,omitempty"` - NotificationsURL *string `json:"notifications_url,omitempty"` - PullsURL *string `json:"pulls_url,omitempty"` - ReleasesURL *string `json:"releases_url,omitempty"` - StargazersURL *string `json:"stargazers_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - SubscribersURL *string `json:"subscribers_url,omitempty"` - SubscriptionURL *string `json:"subscription_url,omitempty"` - TagsURL *string `json:"tags_url,omitempty"` - TreesURL *string `json:"trees_url,omitempty"` - TeamsURL *string `json:"teams_url,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://docs.github.com/en/rest/search/#text-match-metadata - TextMatches []*TextMatch `json:"text_matches,omitempty"` - - // Visibility is only used for Create and Edit endpoints. The visibility field - // overrides the field parameter when both are used. - // Can be one of public, private or internal. - Visibility *string `json:"visibility,omitempty"` - - // RoleName is only returned by the API 'check team permissions for a repository'. - // See: teams.go (IsTeamRepoByID) https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-repository - RoleName *string `json:"role_name,omitempty"` -} - -func (r Repository) String() string { - return Stringify(r) -} - -// BranchListOptions specifies the optional parameters to the -// RepositoriesService.ListBranches method. -type BranchListOptions struct { - // Setting to true returns only protected branches. - // When set to false, only unprotected branches are returned. - // Omitting this parameter returns all branches. - // Default: nil - Protected *bool `url:"protected,omitempty"` - - ListOptions -} - -// RepositoryListOptions specifies the optional parameters to the -// RepositoriesService.List method. -type RepositoryListOptions struct { - // Visibility of repositories to list. Can be one of all, public, or private. - // Default: all - Visibility string `url:"visibility,omitempty"` - - // List repos of given affiliation[s]. - // Comma-separated list of values. Can include: - // * owner: Repositories that are owned by the authenticated user. - // * collaborator: Repositories that the user has been added to as a - // collaborator. - // * organization_member: Repositories that the user has access to through - // being a member of an organization. This includes every repository on - // every team that the user is on. - // Default: owner,collaborator,organization_member - Affiliation string `url:"affiliation,omitempty"` - - // Type of repositories to list. - // Can be one of all, owner, public, private, member. Default: all - // Will cause a 422 error if used in the same request as visibility or - // affiliation. - Type string `url:"type,omitempty"` - - // How to sort the repository list. Can be one of created, updated, pushed, - // full_name. Default: full_name - Sort string `url:"sort,omitempty"` - - // Direction in which to sort repositories. Can be one of asc or desc. - // Default: when using full_name: asc; otherwise desc - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// SecurityAndAnalysis specifies the optional advanced security features -// that are enabled on a given repository. -type SecurityAndAnalysis struct { - AdvancedSecurity *AdvancedSecurity `json:"advanced_security,omitempty"` - SecretScanning *SecretScanning `json:"secret_scanning,omitempty"` - SecretScanningPushProtection *SecretScanningPushProtection `json:"secret_scanning_push_protection,omitempty"` -} - -func (s SecurityAndAnalysis) String() string { - return Stringify(s) -} - -// AdvancedSecurity specifies the state of advanced security on a repository. -// -// GitHub API docs: https://docs.github.com/en/github/getting-started-with-github/learning-about-github/about-github-advanced-security -type AdvancedSecurity struct { - Status *string `json:"status,omitempty"` -} - -func (a AdvancedSecurity) String() string { - return Stringify(a) -} - -// SecretScanning specifies the state of secret scanning on a repository. -// -// GitHub API docs: https://docs.github.com/en/code-security/secret-security/about-secret-scanning -type SecretScanning struct { - Status *string `json:"status,omitempty"` -} - -func (s SecretScanning) String() string { - return Stringify(s) -} - -// SecretScanningPushProtection specifies the state of secret scanning push protection on a repository. -// -// GitHub API docs: https://docs.github.com/en/code-security/secret-scanning/about-secret-scanning#about-secret-scanning-for-partner-patterns -type SecretScanningPushProtection struct { - Status *string `json:"status,omitempty"` -} - -// List the repositories for a user. Passing the empty string will list -// repositories for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repositories-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repositories-for-a-user -func (s *RepositoriesService) List(ctx context.Context, user string, opts *RepositoryListOptions) ([]*Repository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/repos", user) - } else { - u = "user/repos" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeTopicsPreview, mediaTypeRepositoryVisibilityPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// RepositoryListByOrgOptions specifies the optional parameters to the -// RepositoriesService.ListByOrg method. -type RepositoryListByOrgOptions struct { - // Type of repositories to list. Possible values are: all, public, private, - // forks, sources, member. Default is "all". - Type string `url:"type,omitempty"` - - // How to sort the repository list. Can be one of created, updated, pushed, - // full_name. Default is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort repositories. Can be one of asc or desc. - // Default when using full_name: asc; otherwise desc. - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// ListByOrg lists the repositories for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-organization-repositories -func (s *RepositoriesService) ListByOrg(ctx context.Context, org string, opts *RepositoryListByOrgOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("orgs/%v/repos", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeTopicsPreview, mediaTypeRepositoryVisibilityPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// RepositoryListAllOptions specifies the optional parameters to the -// RepositoriesService.ListAll method. -type RepositoryListAllOptions struct { - // ID of the last repository seen - Since int64 `url:"since,omitempty"` -} - -// ListAll lists all GitHub repositories in the order that they were created. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-public-repositories -func (s *RepositoriesService) ListAll(ctx context.Context, opts *RepositoryListAllOptions) ([]*Repository, *Response, error) { - u, err := addOptions("repositories", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// createRepoRequest is a subset of Repository and is used internally -// by Create to pass only the known fields for the endpoint. -// -// See https://github.com/google/go-github/issues/1014 for more -// information. -type createRepoRequest struct { - // Name is required when creating a repo. - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - Homepage *string `json:"homepage,omitempty"` - - Private *bool `json:"private,omitempty"` - Visibility *string `json:"visibility,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasProjects *bool `json:"has_projects,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - HasDiscussions *bool `json:"has_discussions,omitempty"` - IsTemplate *bool `json:"is_template,omitempty"` - - // Creating an organization repository. Required for non-owners. - TeamID *int64 `json:"team_id,omitempty"` - - AutoInit *bool `json:"auto_init,omitempty"` - GitignoreTemplate *string `json:"gitignore_template,omitempty"` - LicenseTemplate *string `json:"license_template,omitempty"` - AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` - AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` - AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` - AllowUpdateBranch *bool `json:"allow_update_branch,omitempty"` - AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` - AllowForking *bool `json:"allow_forking,omitempty"` - DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` - UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` - SquashMergeCommitTitle *string `json:"squash_merge_commit_title,omitempty"` - SquashMergeCommitMessage *string `json:"squash_merge_commit_message,omitempty"` - MergeCommitTitle *string `json:"merge_commit_title,omitempty"` - MergeCommitMessage *string `json:"merge_commit_message,omitempty"` -} - -// Create a new repository. If an organization is specified, the new -// repository will be created under that org. If the empty string is -// specified, it will be created for the authenticated user. -// -// Note that only a subset of the repo fields are used and repo must -// not be nil. -// -// Also note that this method will return the response without actually -// waiting for GitHub to finish creating the repository and letting the -// changes propagate throughout its servers. You may set up a loop with -// exponential back-off to verify repository's creation. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-a-repository-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-an-organization-repository -func (s *RepositoriesService) Create(ctx context.Context, org string, repo *Repository) (*Repository, *Response, error) { - var u string - if org != "" { - u = fmt.Sprintf("orgs/%v/repos", org) - } else { - u = "user/repos" - } - - repoReq := &createRepoRequest{ - Name: repo.Name, - Description: repo.Description, - Homepage: repo.Homepage, - Private: repo.Private, - Visibility: repo.Visibility, - HasIssues: repo.HasIssues, - HasProjects: repo.HasProjects, - HasWiki: repo.HasWiki, - HasDiscussions: repo.HasDiscussions, - IsTemplate: repo.IsTemplate, - TeamID: repo.TeamID, - AutoInit: repo.AutoInit, - GitignoreTemplate: repo.GitignoreTemplate, - LicenseTemplate: repo.LicenseTemplate, - AllowSquashMerge: repo.AllowSquashMerge, - AllowMergeCommit: repo.AllowMergeCommit, - AllowRebaseMerge: repo.AllowRebaseMerge, - AllowUpdateBranch: repo.AllowUpdateBranch, - AllowAutoMerge: repo.AllowAutoMerge, - AllowForking: repo.AllowForking, - DeleteBranchOnMerge: repo.DeleteBranchOnMerge, - UseSquashPRTitleAsDefault: repo.UseSquashPRTitleAsDefault, - SquashMergeCommitTitle: repo.SquashMergeCommitTitle, - SquashMergeCommitMessage: repo.SquashMergeCommitMessage, - MergeCommitTitle: repo.MergeCommitTitle, - MergeCommitMessage: repo.MergeCommitMessage, - } - - req, err := s.client.NewRequest("POST", u, repoReq) - if err != nil { - return nil, nil, err - } - - acceptHeaders := []string{mediaTypeRepositoryTemplatePreview, mediaTypeRepositoryVisibilityPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// TemplateRepoRequest represents a request to create a repository from a template. -type TemplateRepoRequest struct { - // Name is required when creating a repo. - Name *string `json:"name,omitempty"` - Owner *string `json:"owner,omitempty"` - Description *string `json:"description,omitempty"` - - IncludeAllBranches *bool `json:"include_all_branches,omitempty"` - Private *bool `json:"private,omitempty"` -} - -// CreateFromTemplate generates a repository from a template. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-a-repository-using-a-template -func (s *RepositoriesService) CreateFromTemplate(ctx context.Context, templateOwner, templateRepo string, templateRepoReq *TemplateRepoRequest) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/generate", templateOwner, templateRepo) - - req, err := s.client.NewRequest("POST", u, templateRepoReq) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeRepositoryTemplatePreview) - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// Get fetches a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#get-a-repository -func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when the license support fully launches - // https://docs.github.com/en/rest/licenses/#get-a-repositorys-license - acceptHeaders := []string{ - mediaTypeCodesOfConductPreview, - mediaTypeTopicsPreview, - mediaTypeRepositoryTemplatePreview, - mediaTypeRepositoryVisibilityPreview, - } - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - repository := new(Repository) - resp, err := s.client.Do(ctx, req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, nil -} - -// GetCodeOfConduct gets the contents of a repository's code of conduct. -// Note that https://docs.github.com/en/rest/codes-of-conduct#about-the-codes-of-conduct-api -// says to use the GET /repos/{owner}/{repo} endpoint. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#update-a-repository -func (s *RepositoriesService) GetCodeOfConduct(ctx context.Context, owner, repo string) (*CodeOfConduct, *Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCodesOfConductPreview) - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r.GetCodeOfConduct(), resp, nil -} - -// GetByID fetches a repository. -// -// Note: GetByID uses the undocumented GitHub API endpoint /repositories/:id. -func (s *RepositoriesService) GetByID(ctx context.Context, id int64) (*Repository, *Response, error) { - u := fmt.Sprintf("repositories/%d", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - repository := new(Repository) - resp, err := s.client.Do(ctx, req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, nil -} - -// Edit updates a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#update-a-repository -func (s *RepositoriesService) Edit(ctx context.Context, owner, repo string, repository *Repository) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("PATCH", u, repository) - if err != nil { - return nil, nil, err - } - - acceptHeaders := []string{mediaTypeRepositoryTemplatePreview, mediaTypeRepositoryVisibilityPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// Delete a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#delete-a-repository -func (s *RepositoriesService) Delete(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Contributor represents a repository contributor -type Contributor struct { - Login *string `json:"login,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin,omitempty"` - Contributions *int `json:"contributions,omitempty"` - Name *string `json:"name,omitempty"` - Email *string `json:"email,omitempty"` -} - -// ListContributorsOptions specifies the optional parameters to the -// RepositoriesService.ListContributors method. -type ListContributorsOptions struct { - // Include anonymous contributors in results or not - Anon string `url:"anon,omitempty"` - - ListOptions -} - -// GetVulnerabilityAlerts checks if vulnerability alerts are enabled for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#check-if-vulnerability-alerts-are-enabled-for-a-repository -func (s *RepositoriesService) GetVulnerabilityAlerts(ctx context.Context, owner, repository string) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/vulnerability-alerts", owner, repository) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredVulnerabilityAlertsPreview) - - resp, err := s.client.Do(ctx, req, nil) - vulnerabilityAlertsEnabled, err := parseBoolResponse(err) - return vulnerabilityAlertsEnabled, resp, err -} - -// EnableVulnerabilityAlerts enables vulnerability alerts and the dependency graph for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#enable-vulnerability-alerts -func (s *RepositoriesService) EnableVulnerabilityAlerts(ctx context.Context, owner, repository string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/vulnerability-alerts", owner, repository) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredVulnerabilityAlertsPreview) - - return s.client.Do(ctx, req, nil) -} - -// DisableVulnerabilityAlerts disables vulnerability alerts and the dependency graph for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#disable-vulnerability-alerts -func (s *RepositoriesService) DisableVulnerabilityAlerts(ctx context.Context, owner, repository string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/vulnerability-alerts", owner, repository) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredVulnerabilityAlertsPreview) - - return s.client.Do(ctx, req, nil) -} - -// EnableAutomatedSecurityFixes enables the automated security fixes for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#enable-automated-security-fixes -func (s *RepositoriesService) EnableAutomatedSecurityFixes(ctx context.Context, owner, repository string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/automated-security-fixes", owner, repository) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredAutomatedSecurityFixesPreview) - - return s.client.Do(ctx, req, nil) -} - -// DisableAutomatedSecurityFixes disables vulnerability alerts and the dependency graph for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#disable-automated-security-fixes -func (s *RepositoriesService) DisableAutomatedSecurityFixes(ctx context.Context, owner, repository string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/automated-security-fixes", owner, repository) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredAutomatedSecurityFixesPreview) - - return s.client.Do(ctx, req, nil) -} - -// ListContributors lists contributors for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-contributors -func (s *RepositoriesService) ListContributors(ctx context.Context, owner string, repository string, opts *ListContributorsOptions) ([]*Contributor, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/contributors", owner, repository) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var contributor []*Contributor - resp, err := s.client.Do(ctx, req, &contributor) - if err != nil { - return nil, resp, err - } - - return contributor, resp, nil -} - -// ListLanguages lists languages for the specified repository. The returned map -// specifies the languages and the number of bytes of code written in that -// language. For example: -// -// { -// "C": 78769, -// "Python": 7769 -// } -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-languages -func (s *RepositoriesService) ListLanguages(ctx context.Context, owner string, repo string) (map[string]int, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/languages", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - languages := make(map[string]int) - resp, err := s.client.Do(ctx, req, &languages) - if err != nil { - return nil, resp, err - } - - return languages, resp, nil -} - -// ListTeams lists the teams for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-teams -func (s *RepositoriesService) ListTeams(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/teams", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// RepositoryTag represents a repository tag. -type RepositoryTag struct { - Name *string `json:"name,omitempty"` - Commit *Commit `json:"commit,omitempty"` - ZipballURL *string `json:"zipball_url,omitempty"` - TarballURL *string `json:"tarball_url,omitempty"` -} - -// ListTags lists tags for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-tags -func (s *RepositoriesService) ListTags(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*RepositoryTag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/tags", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var tags []*RepositoryTag - resp, err := s.client.Do(ctx, req, &tags) - if err != nil { - return nil, resp, err - } - - return tags, resp, nil -} - -// Branch represents a repository branch -type Branch struct { - Name *string `json:"name,omitempty"` - Commit *RepositoryCommit `json:"commit,omitempty"` - Protected *bool `json:"protected,omitempty"` -} - -// Protection represents a repository branch's protection. -type Protection struct { - RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` - RequiredPullRequestReviews *PullRequestReviewsEnforcement `json:"required_pull_request_reviews"` - EnforceAdmins *AdminEnforcement `json:"enforce_admins"` - Restrictions *BranchRestrictions `json:"restrictions"` - RequireLinearHistory *RequireLinearHistory `json:"required_linear_history"` - AllowForcePushes *AllowForcePushes `json:"allow_force_pushes"` - AllowDeletions *AllowDeletions `json:"allow_deletions"` - RequiredConversationResolution *RequiredConversationResolution `json:"required_conversation_resolution"` - BlockCreations *BlockCreations `json:"block_creations,omitempty"` - LockBranch *LockBranch `json:"lock_branch,omitempty"` - AllowForkSyncing *AllowForkSyncing `json:"allow_fork_syncing,omitempty"` - RequiredSignatures *SignaturesProtectedBranch `json:"required_signatures,omitempty"` - URL *string `json:"url,omitempty"` -} - -// BlockCreations represents whether users can push changes that create branches. If this is true, this -// setting blocks pushes that create new branches, unless the push is initiated by a user, team, or app -// which has the ability to push. -type BlockCreations struct { - Enabled *bool `json:"enabled,omitempty"` -} - -// LockBranch represents if the branch is marked as read-only. If this is true, users will not be able to push to the branch. -type LockBranch struct { - Enabled *bool `json:"enabled,omitempty"` -} - -// AllowForkSyncing represents whether users can pull changes from upstream when the branch is locked. -type AllowForkSyncing struct { - Enabled *bool `json:"enabled,omitempty"` -} - -// BranchProtectionRule represents the rule applied to a repositories branch. -type BranchProtectionRule struct { - ID *int64 `json:"id,omitempty"` - RepositoryID *int64 `json:"repository_id,omitempty"` - Name *string `json:"name,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - PullRequestReviewsEnforcementLevel *string `json:"pull_request_reviews_enforcement_level,omitempty"` - RequiredApprovingReviewCount *int `json:"required_approving_review_count,omitempty"` - DismissStaleReviewsOnPush *bool `json:"dismiss_stale_reviews_on_push,omitempty"` - AuthorizedDismissalActorsOnly *bool `json:"authorized_dismissal_actors_only,omitempty"` - IgnoreApprovalsFromContributors *bool `json:"ignore_approvals_from_contributors,omitempty"` - RequireCodeOwnerReview *bool `json:"require_code_owner_review,omitempty"` - RequiredStatusChecks []string `json:"required_status_checks,omitempty"` - RequiredStatusChecksEnforcementLevel *string `json:"required_status_checks_enforcement_level,omitempty"` - StrictRequiredStatusChecksPolicy *bool `json:"strict_required_status_checks_policy,omitempty"` - SignatureRequirementEnforcementLevel *string `json:"signature_requirement_enforcement_level,omitempty"` - LinearHistoryRequirementEnforcementLevel *string `json:"linear_history_requirement_enforcement_level,omitempty"` - AdminEnforced *bool `json:"admin_enforced,omitempty"` - AllowForcePushesEnforcementLevel *string `json:"allow_force_pushes_enforcement_level,omitempty"` - AllowDeletionsEnforcementLevel *string `json:"allow_deletions_enforcement_level,omitempty"` - MergeQueueEnforcementLevel *string `json:"merge_queue_enforcement_level,omitempty"` - RequiredDeploymentsEnforcementLevel *string `json:"required_deployments_enforcement_level,omitempty"` - RequiredConversationResolutionLevel *string `json:"required_conversation_resolution_level,omitempty"` - AuthorizedActorsOnly *bool `json:"authorized_actors_only,omitempty"` - AuthorizedActorNames []string `json:"authorized_actor_names,omitempty"` -} - -// ProtectionChanges represents the changes to the rule if the BranchProtection was edited. -type ProtectionChanges struct { - AdminEnforced *AdminEnforcedChanges `json:"admin_enforced,omitempty"` - AllowDeletionsEnforcementLevel *AllowDeletionsEnforcementLevelChanges `json:"allow_deletions_enforcement_level,omitempty"` - AuthorizedActorNames *AuthorizedActorNames `json:"authorized_actor_names,omitempty"` - AuthorizedActorsOnly *AuthorizedActorsOnly `json:"authorized_actors_only,omitempty"` - AuthorizedDismissalActorsOnly *AuthorizedDismissalActorsOnlyChanges `json:"authorized_dismissal_actors_only,omitempty"` - CreateProtected *CreateProtectedChanges `json:"create_protected,omitempty"` - DismissStaleReviewsOnPush *DismissStaleReviewsOnPushChanges `json:"dismiss_stale_reviews_on_push,omitempty"` - LinearHistoryRequirementEnforcementLevel *LinearHistoryRequirementEnforcementLevelChanges `json:"linear_history_requirement_enforcement_level,omitempty"` - PullRequestReviewsEnforcementLevel *PullRequestReviewsEnforcementLevelChanges `json:"pull_request_reviews_enforcement_level,omitempty"` - RequireCodeOwnerReview *RequireCodeOwnerReviewChanges `json:"require_code_owner_review,omitempty"` - RequiredConversationResolutionLevel *RequiredConversationResolutionLevelChanges `json:"required_conversation_resolution_level,omitempty"` - RequiredDeploymentsEnforcementLevel *RequiredDeploymentsEnforcementLevelChanges `json:"required_deployments_enforcement_level,omitempty"` - RequiredStatusChecks *RequiredStatusChecksChanges `json:"required_status_checks,omitempty"` - RequiredStatusChecksEnforcementLevel *RequiredStatusChecksEnforcementLevelChanges `json:"required_status_checks_enforcement_level,omitempty"` - SignatureRequirementEnforcementLevel *SignatureRequirementEnforcementLevelChanges `json:"signature_requirement_enforcement_level,omitempty"` -} - -// AdminEnforcedChanges represents the changes made to the AdminEnforced policy. -type AdminEnforcedChanges struct { - From *bool `json:"from,omitempty"` -} - -// AllowDeletionsEnforcementLevelChanges represents the changes made to the AllowDeletionsEnforcementLevel policy. -type AllowDeletionsEnforcementLevelChanges struct { - From *string `json:"from,omitempty"` -} - -// AuthorizedActorNames represents who are authorized to edit the branch protection rules. -type AuthorizedActorNames struct { - From []string `json:"from,omitempty"` -} - -// AuthorizedActorsOnly represents if the branch rule can be edited by authorized actors only. -type AuthorizedActorsOnly struct { - From *bool `json:"from,omitempty"` -} - -// AuthorizedDismissalActorsOnlyChanges represents the changes made to the AuthorizedDismissalActorsOnly policy. -type AuthorizedDismissalActorsOnlyChanges struct { - From *bool `json:"from,omitempty"` -} - -// CreateProtectedChanges represents the changes made to the CreateProtected policy. -type CreateProtectedChanges struct { - From *bool `json:"from,omitempty"` -} - -// DismissStaleReviewsOnPushChanges represents the changes made to the DismissStaleReviewsOnPushChanges policy. -type DismissStaleReviewsOnPushChanges struct { - From *bool `json:"from,omitempty"` -} - -// LinearHistoryRequirementEnforcementLevelChanges represents the changes made to the LinearHistoryRequirementEnforcementLevel policy. -type LinearHistoryRequirementEnforcementLevelChanges struct { - From *string `json:"from,omitempty"` -} - -// PullRequestReviewsEnforcementLevelChanges represents the changes made to the PullRequestReviewsEnforcementLevel policy. -type PullRequestReviewsEnforcementLevelChanges struct { - From *string `json:"from,omitempty"` -} - -// RequireCodeOwnerReviewChanges represents the changes made to the RequireCodeOwnerReview policy. -type RequireCodeOwnerReviewChanges struct { - From *bool `json:"from,omitempty"` -} - -// RequiredConversationResolutionLevelChanges represents the changes made to the RequiredConversationResolutionLevel policy. -type RequiredConversationResolutionLevelChanges struct { - From *string `json:"from,omitempty"` -} - -// RequiredDeploymentsEnforcementLevelChanges represents the changes made to the RequiredDeploymentsEnforcementLevel policy. -type RequiredDeploymentsEnforcementLevelChanges struct { - From *string `json:"from,omitempty"` -} - -// RequiredStatusChecksChanges represents the changes made to the RequiredStatusChecks policy. -type RequiredStatusChecksChanges struct { - From []string `json:"from,omitempty"` -} - -// RequiredStatusChecksEnforcementLevelChanges represents the changes made to the RequiredStatusChecksEnforcementLevel policy. -type RequiredStatusChecksEnforcementLevelChanges struct { - From *string `json:"from,omitempty"` -} - -// SignatureRequirementEnforcementLevelChanges represents the changes made to the SignatureRequirementEnforcementLevel policy. -type SignatureRequirementEnforcementLevelChanges struct { - From *string `json:"from,omitempty"` -} - -// ProtectionRequest represents a request to create/edit a branch's protection. -type ProtectionRequest struct { - RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` - RequiredPullRequestReviews *PullRequestReviewsEnforcementRequest `json:"required_pull_request_reviews"` - EnforceAdmins bool `json:"enforce_admins"` - Restrictions *BranchRestrictionsRequest `json:"restrictions"` - // Enforces a linear commit Git history, which prevents anyone from pushing merge commits to a branch. - RequireLinearHistory *bool `json:"required_linear_history,omitempty"` - // Permits force pushes to the protected branch by anyone with write access to the repository. - AllowForcePushes *bool `json:"allow_force_pushes,omitempty"` - // Allows deletion of the protected branch by anyone with write access to the repository. - AllowDeletions *bool `json:"allow_deletions,omitempty"` - // RequiredConversationResolution, if set to true, requires all comments - // on the pull request to be resolved before it can be merged to a protected branch. - RequiredConversationResolution *bool `json:"required_conversation_resolution,omitempty"` - // BlockCreations, if set to true, will cause the restrictions setting to also block pushes - // which create new branches, unless initiated by a user, team, app with the ability to push. - BlockCreations *bool `json:"block_creations,omitempty"` - // LockBranch, if set to true, will prevent users from pushing to the branch. - LockBranch *bool `json:"lock_branch,omitempty"` - // AllowForkSyncing, if set to true, will allow users to pull changes from upstream - // when the branch is locked. - AllowForkSyncing *bool `json:"allow_fork_syncing,omitempty"` -} - -// RequiredStatusChecks represents the protection status of a individual branch. -type RequiredStatusChecks struct { - // Require branches to be up to date before merging. (Required.) - Strict bool `json:"strict"` - // The list of status checks to require in order to merge into this - // branch. (Deprecated. Note: only one of Contexts/Checks can be populated, - // but at least one must be populated). - Contexts []string `json:"contexts,omitempty"` - // The list of status checks to require in order to merge into this - // branch. - Checks []*RequiredStatusCheck `json:"checks"` - ContextsURL *string `json:"contexts_url,omitempty"` - URL *string `json:"url,omitempty"` -} - -// RequiredStatusChecksRequest represents a request to edit a protected branch's status checks. -type RequiredStatusChecksRequest struct { - Strict *bool `json:"strict,omitempty"` - // Note: if both Contexts and Checks are populated, - // the GitHub API will only use Checks. - Contexts []string `json:"contexts,omitempty"` - Checks []*RequiredStatusCheck `json:"checks,omitempty"` -} - -// RequiredStatusCheck represents a status check of a protected branch. -type RequiredStatusCheck struct { - // The name of the required check. - Context string `json:"context"` - // The ID of the GitHub App that must provide this check. - // Omit this field to automatically select the GitHub App - // that has recently provided this check, - // or any app if it was not set by a GitHub App. - // Pass -1 to explicitly allow any app to set the status. - AppID *int64 `json:"app_id,omitempty"` -} - -// PullRequestReviewsEnforcement represents the pull request reviews enforcement of a protected branch. -type PullRequestReviewsEnforcement struct { - // Allow specific users, teams, or apps to bypass pull request requirements. - BypassPullRequestAllowances *BypassPullRequestAllowances `json:"bypass_pull_request_allowances,omitempty"` - // Specifies which users, teams and apps can dismiss pull request reviews. - DismissalRestrictions *DismissalRestrictions `json:"dismissal_restrictions,omitempty"` - // Specifies if approved reviews are dismissed automatically, when a new commit is pushed. - DismissStaleReviews bool `json:"dismiss_stale_reviews"` - // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. - RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"` - // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. - // Valid values are 1-6. - RequiredApprovingReviewCount int `json:"required_approving_review_count"` - // RequireLastPushApproval specifies whether the last pusher to a pull request branch can approve it. - RequireLastPushApproval bool `json:"require_last_push_approval"` -} - -// PullRequestReviewsEnforcementRequest represents request to set the pull request review -// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcement above -// because the request structure is different from the response structure. -type PullRequestReviewsEnforcementRequest struct { - // Allow specific users, teams, or apps to bypass pull request requirements. - BypassPullRequestAllowancesRequest *BypassPullRequestAllowancesRequest `json:"bypass_pull_request_allowances,omitempty"` - // Specifies which users, teams and apps should be allowed to dismiss pull request reviews. - // User, team and app dismissal restrictions are only available for - // organization-owned repositories. Must be nil for personal repositories. - DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` - // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. (Required) - DismissStaleReviews bool `json:"dismiss_stale_reviews"` - // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. - RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"` - // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. - // Valid values are 1-6. - RequiredApprovingReviewCount int `json:"required_approving_review_count"` - // RequireLastPushApproval specifies whether the last pusher to a pull request branch can approve it. - RequireLastPushApproval *bool `json:"require_last_push_approval,omitempty"` -} - -// PullRequestReviewsEnforcementUpdate represents request to patch the pull request review -// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcementRequest above -// because the patch request does not require all fields to be initialized. -type PullRequestReviewsEnforcementUpdate struct { - // Allow specific users, teams, or apps to bypass pull request requirements. - BypassPullRequestAllowancesRequest *BypassPullRequestAllowancesRequest `json:"bypass_pull_request_allowances,omitempty"` - // Specifies which users, teams and apps can dismiss pull request reviews. Can be omitted. - DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` - // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be omitted. - DismissStaleReviews *bool `json:"dismiss_stale_reviews,omitempty"` - // RequireCodeOwnerReviews specifies if merging pull requests is blocked until code owners have reviewed. - RequireCodeOwnerReviews *bool `json:"require_code_owner_reviews,omitempty"` - // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. - // Valid values are 1 - 6 or 0 to not require reviewers. - RequiredApprovingReviewCount int `json:"required_approving_review_count"` - // RequireLastPushApproval specifies whether the last pusher to a pull request branch can approve it. - RequireLastPushApproval *bool `json:"require_last_push_approval,omitempty"` -} - -// RequireLinearHistory represents the configuration to enforce branches with no merge commit. -type RequireLinearHistory struct { - Enabled bool `json:"enabled"` -} - -// AllowDeletions represents the configuration to accept deletion of protected branches. -type AllowDeletions struct { - Enabled bool `json:"enabled"` -} - -// AllowForcePushes represents the configuration to accept forced pushes on protected branches. -type AllowForcePushes struct { - Enabled bool `json:"enabled"` -} - -// RequiredConversationResolution, if enabled, requires all comments on the pull request to be resolved before it can be merged to a protected branch. -type RequiredConversationResolution struct { - Enabled bool `json:"enabled"` -} - -// AdminEnforcement represents the configuration to enforce required status checks for repository administrators. -type AdminEnforcement struct { - URL *string `json:"url,omitempty"` - Enabled bool `json:"enabled"` -} - -// BranchRestrictions represents the restriction that only certain users or -// teams may push to a branch. -type BranchRestrictions struct { - // The list of user logins with push access. - Users []*User `json:"users"` - // The list of team slugs with push access. - Teams []*Team `json:"teams"` - // The list of app slugs with push access. - Apps []*App `json:"apps"` -} - -// BranchRestrictionsRequest represents the request to create/edit the -// restriction that only certain users or teams may push to a branch. It is -// separate from BranchRestrictions above because the request structure is -// different from the response structure. -type BranchRestrictionsRequest struct { - // The list of user logins with push access. (Required; use []string{} instead of nil for empty list.) - Users []string `json:"users"` - // The list of team slugs with push access. (Required; use []string{} instead of nil for empty list.) - Teams []string `json:"teams"` - // The list of app slugs with push access. - Apps []string `json:"apps"` -} - -// BypassPullRequestAllowances represents the people, teams, or apps who are allowed to bypass required pull requests. -type BypassPullRequestAllowances struct { - // The list of users allowed to bypass pull request requirements. - Users []*User `json:"users"` - // The list of teams allowed to bypass pull request requirements. - Teams []*Team `json:"teams"` - // The list of apps allowed to bypass pull request requirements. - Apps []*App `json:"apps"` -} - -// BypassPullRequestAllowancesRequest represents the people, teams, or apps who are -// allowed to bypass required pull requests. -// It is separate from BypassPullRequestAllowances above because the request structure is -// different from the response structure. -type BypassPullRequestAllowancesRequest struct { - // The list of user logins allowed to bypass pull request requirements. - Users []string `json:"users"` - // The list of team slugs allowed to bypass pull request requirements. - Teams []string `json:"teams"` - // The list of app slugs allowed to bypass pull request requirements. - Apps []string `json:"apps"` -} - -// DismissalRestrictions specifies which users and teams can dismiss pull request reviews. -type DismissalRestrictions struct { - // The list of users who can dimiss pull request reviews. - Users []*User `json:"users"` - // The list of teams which can dismiss pull request reviews. - Teams []*Team `json:"teams"` - // The list of apps which can dismiss pull request reviews. - Apps []*App `json:"apps"` -} - -// DismissalRestrictionsRequest represents the request to create/edit the -// restriction to allows only specific users, teams or apps to dimiss pull request reviews. It is -// separate from DismissalRestrictions above because the request structure is -// different from the response structure. -// Note: Both Users and Teams must be nil, or both must be non-nil. -type DismissalRestrictionsRequest struct { - // The list of user logins who can dismiss pull request reviews. (Required; use nil to disable dismissal_restrictions or &[]string{} otherwise.) - Users *[]string `json:"users,omitempty"` - // The list of team slugs which can dismiss pull request reviews. (Required; use nil to disable dismissal_restrictions or &[]string{} otherwise.) - Teams *[]string `json:"teams,omitempty"` - // The list of app slugs which can dismiss pull request reviews. (Required; use nil to disable dismissal_restrictions or &[]string{} otherwise.) - Apps *[]string `json:"apps,omitempty"` -} - -// SignaturesProtectedBranch represents the protection status of an individual branch. -type SignaturesProtectedBranch struct { - URL *string `json:"url,omitempty"` - // Commits pushed to matching branches must have verified signatures. - Enabled *bool `json:"enabled,omitempty"` -} - -// ListBranches lists branches for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branches#list-branches -func (s *RepositoriesService) ListBranches(ctx context.Context, owner string, repo string, opts *BranchListOptions) ([]*Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var branches []*Branch - resp, err := s.client.Do(ctx, req, &branches) - if err != nil { - return nil, resp, err - } - - return branches, resp, nil -} - -// GetBranch gets the specified branch for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branches#get-a-branch -func (s *RepositoriesService) GetBranch(ctx context.Context, owner, repo, branch string, followRedirects bool) (*Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branch) - - resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) - } - - b := new(Branch) - err = json.NewDecoder(resp.Body).Decode(b) - return b, newResponse(resp), err -} - -// renameBranchRequest represents a request to rename a branch. -type renameBranchRequest struct { - NewName string `json:"new_name"` -} - -// RenameBranch renames a branch in a repository. -// -// To rename a non-default branch: Users must have push access. GitHub Apps must have the `contents:write` repository permission. -// To rename the default branch: Users must have admin or owner permissions. GitHub Apps must have the `administration:write` repository permission. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branches#rename-a-branch -func (s *RepositoriesService) RenameBranch(ctx context.Context, owner, repo, branch, newName string) (*Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/rename", owner, repo, branch) - r := &renameBranchRequest{NewName: newName} - req, err := s.client.NewRequest("POST", u, r) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(ctx, req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// GetBranchProtection gets the protection of a given branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-branch-protection -func (s *RepositoriesService) GetBranchProtection(ctx context.Context, owner, repo, branch string) (*Protection, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - p := new(Protection) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - if isBranchNotProtected(err) { - err = ErrBranchNotProtected - } - return nil, resp, err - } - - return p, resp, nil -} - -// GetRequiredStatusChecks gets the required status checks for a given protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-status-checks-protection -func (s *RepositoriesService) GetRequiredStatusChecks(ctx context.Context, owner, repo, branch string) (*RequiredStatusChecks, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - p := new(RequiredStatusChecks) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - if isBranchNotProtected(err) { - err = ErrBranchNotProtected - } - return nil, resp, err - } - - return p, resp, nil -} - -// ListRequiredStatusChecksContexts lists the required status checks contexts for a given protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-all-status-check-contexts -func (s *RepositoriesService) ListRequiredStatusChecksContexts(ctx context.Context, owner, repo, branch string) (contexts []string, resp *Response, err error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks/contexts", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.Do(ctx, req, &contexts) - if err != nil { - if isBranchNotProtected(err) { - err = ErrBranchNotProtected - } - return nil, resp, err - } - - return contexts, resp, nil -} - -// UpdateBranchProtection updates the protection of a given branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-branch-protection -func (s *RepositoriesService) UpdateBranchProtection(ctx context.Context, owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) - req, err := s.client.NewRequest("PUT", u, preq) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - p := new(Protection) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// RemoveBranchProtection removes the protection of a given branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-branch-protection -func (s *RepositoriesService) RemoveBranchProtection(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetSignaturesProtectedBranch gets required signatures of protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-commit-signature-protection -func (s *RepositoriesService) GetSignaturesProtectedBranch(ctx context.Context, owner, repo, branch string) (*SignaturesProtectedBranch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeSignaturePreview) - - p := new(SignaturesProtectedBranch) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// RequireSignaturesOnProtectedBranch makes signed commits required on a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#create-commit-signature-protection -func (s *RepositoriesService) RequireSignaturesOnProtectedBranch(ctx context.Context, owner, repo, branch string) (*SignaturesProtectedBranch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeSignaturePreview) - - r := new(SignaturesProtectedBranch) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// OptionalSignaturesOnProtectedBranch removes required signed commits on a given branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-commit-signature-protection -func (s *RepositoriesService) OptionalSignaturesOnProtectedBranch(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeSignaturePreview) - - return s.client.Do(ctx, req, nil) -} - -// UpdateRequiredStatusChecks updates the required status checks for a given protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-status-check-protection -func (s *RepositoriesService) UpdateRequiredStatusChecks(ctx context.Context, owner, repo, branch string, sreq *RequiredStatusChecksRequest) (*RequiredStatusChecks, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) - req, err := s.client.NewRequest("PATCH", u, sreq) - if err != nil { - return nil, nil, err - } - - sc := new(RequiredStatusChecks) - resp, err := s.client.Do(ctx, req, sc) - if err != nil { - return nil, resp, err - } - - return sc, resp, nil -} - -// RemoveRequiredStatusChecks removes the required status checks for a given protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-status-check-protection -func (s *RepositoriesService) RemoveRequiredStatusChecks(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// License gets the contents of a repository's license if one is detected. -// -// GitHub API docs: https://docs.github.com/en/rest/licenses#get-the-license-for-a-repository -func (s *RepositoriesService) License(ctx context.Context, owner, repo string) (*RepositoryLicense, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/license", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - r := &RepositoryLicense{} - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// GetPullRequestReviewEnforcement gets pull request review enforcement of a protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-pull-request-review-protection -func (s *RepositoriesService) GetPullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(PullRequestReviewsEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// UpdatePullRequestReviewEnforcement patches pull request review enforcement of a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-pull-request-review-protection -func (s *RepositoriesService) UpdatePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string, patch *PullRequestReviewsEnforcementUpdate) (*PullRequestReviewsEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - req, err := s.client.NewRequest("PATCH", u, patch) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(PullRequestReviewsEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DisableDismissalRestrictions disables dismissal restrictions of a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-pull-request-review-protection -func (s *RepositoriesService) DisableDismissalRestrictions(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - - data := new(struct { - DismissalRestrictionsRequest `json:"dismissal_restrictions"` - }) - - req, err := s.client.NewRequest("PATCH", u, data) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeRequiredApprovingReviewsPreview) - - r := new(PullRequestReviewsEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// RemovePullRequestReviewEnforcement removes pull request enforcement of a protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-pull-request-review-protection -func (s *RepositoriesService) RemovePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetAdminEnforcement gets admin enforcement information of a protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-admin-branch-protection -func (s *RepositoriesService) GetAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - r := new(AdminEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// AddAdminEnforcement adds admin enforcement to a protected branch. -// It requires admin access and branch protection to be enabled. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-admin-branch-protection -func (s *RepositoriesService) AddAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - r := new(AdminEnforcement) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// RemoveAdminEnforcement removes admin enforcement from a protected branch. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-admin-branch-protection -func (s *RepositoriesService) RemoveAdminEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// repositoryTopics represents a collection of repository topics. -type repositoryTopics struct { - Names []string `json:"names"` -} - -// ListAllTopics lists topics for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#get-all-repository-topics -func (s *RepositoriesService) ListAllTopics(ctx context.Context, owner, repo string) ([]string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/topics", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) - - topics := new(repositoryTopics) - resp, err := s.client.Do(ctx, req, topics) - if err != nil { - return nil, resp, err - } - - return topics.Names, resp, nil -} - -// ReplaceAllTopics replaces all repository topics. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#replace-all-repository-topics -func (s *RepositoriesService) ReplaceAllTopics(ctx context.Context, owner, repo string, topics []string) ([]string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/topics", owner, repo) - t := &repositoryTopics{ - Names: topics, - } - if t.Names == nil { - t.Names = []string{} - } - req, err := s.client.NewRequest("PUT", u, t) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) - - t = new(repositoryTopics) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t.Names, resp, nil -} - -// ListApps lists the GitHub apps that have push access to a given protected branch. -// It requires the GitHub apps to have `write` access to the `content` permission. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-apps-with-access-to-the-protected-branch -// -// Deprecated: Please use ListAppRestrictions instead. -func (s *RepositoriesService) ListApps(ctx context.Context, owner, repo, branch string) ([]*App, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var apps []*App - resp, err := s.client.Do(ctx, req, &apps) - if err != nil { - return nil, resp, err - } - - return apps, resp, nil -} - -// ListAppRestrictions lists the GitHub apps that have push access to a given protected branch. -// It requires the GitHub apps to have `write` access to the `content` permission. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-apps-with-access-to-the-protected-branch -// -// Note: This is a wrapper around ListApps so a naming convention with ListUserRestrictions and ListTeamRestrictions is preserved. -func (s *RepositoriesService) ListAppRestrictions(ctx context.Context, owner, repo, branch string) ([]*App, *Response, error) { - return s.ListApps(ctx, owner, repo, branch) -} - -// ReplaceAppRestrictions replaces the apps that have push access to a given protected branch. -// It removes all apps that previously had push access and grants push access to the new list of apps. -// It requires the GitHub apps to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-app-access-restrictions -func (s *RepositoriesService) ReplaceAppRestrictions(ctx context.Context, owner, repo, branch string, apps []string) ([]*App, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) - req, err := s.client.NewRequest("PUT", u, apps) - if err != nil { - return nil, nil, err - } - - var newApps []*App - resp, err := s.client.Do(ctx, req, &newApps) - if err != nil { - return nil, resp, err - } - - return newApps, resp, nil -} - -// AddAppRestrictions grants the specified apps push access to a given protected branch. -// It requires the GitHub apps to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#add-app-access-restrictions -func (s *RepositoriesService) AddAppRestrictions(ctx context.Context, owner, repo, branch string, apps []string) ([]*App, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, apps) - if err != nil { - return nil, nil, err - } - - var newApps []*App - resp, err := s.client.Do(ctx, req, &newApps) - if err != nil { - return nil, resp, err - } - - return newApps, resp, nil -} - -// RemoveAppRestrictions removes the restrictions of an app from pushing to this branch. -// It requires the GitHub apps to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-app-access-restrictions -func (s *RepositoriesService) RemoveAppRestrictions(ctx context.Context, owner, repo, branch string, apps []string) ([]*App, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, apps) - if err != nil { - return nil, nil, err - } - - var newApps []*App - resp, err := s.client.Do(ctx, req, &newApps) - if err != nil { - return nil, resp, err - } - - return newApps, resp, nil -} - -// ListTeamRestrictions lists the GitHub teams that have push access to a given protected branch. -// It requires the GitHub teams to have `write` access to the `content` permission. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-teams-with-access-to-the-protected-branch -func (s *RepositoriesService) ListTeamRestrictions(ctx context.Context, owner, repo, branch string) ([]*Team, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// ReplaceTeamRestrictions replaces the team that have push access to a given protected branch. -// This removes all teams that previously had push access and grants push access to the new list of teams. -// It requires the GitHub teams to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-team-access-restrictions -func (s *RepositoriesService) ReplaceTeamRestrictions(ctx context.Context, owner, repo, branch string, teams []string) ([]*Team, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) - req, err := s.client.NewRequest("PUT", u, teams) - if err != nil { - return nil, nil, err - } - - var newTeams []*Team - resp, err := s.client.Do(ctx, req, &newTeams) - if err != nil { - return nil, resp, err - } - - return newTeams, resp, nil -} - -// AddTeamRestrictions grants the specified teams push access to a given protected branch. -// It requires the GitHub teams to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#add-team-access-restrictions -func (s *RepositoriesService) AddTeamRestrictions(ctx context.Context, owner, repo, branch string, teams []string) ([]*Team, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, teams) - if err != nil { - return nil, nil, err - } - - var newTeams []*Team - resp, err := s.client.Do(ctx, req, &newTeams) - if err != nil { - return nil, resp, err - } - - return newTeams, resp, nil -} - -// RemoveTeamRestrictions removes the restrictions of a team from pushing to this branch. -// It requires the GitHub teams to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-team-access-restrictions -func (s *RepositoriesService) RemoveTeamRestrictions(ctx context.Context, owner, repo, branch string, teams []string) ([]*Team, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, teams) - if err != nil { - return nil, nil, err - } - - var newTeams []*Team - resp, err := s.client.Do(ctx, req, &newTeams) - if err != nil { - return nil, resp, err - } - - return newTeams, resp, nil -} - -// ListUserRestrictions lists the GitHub users that have push access to a given protected branch. -// It requires the GitHub users to have `write` access to the `content` permission. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-users-with-access-to-the-protected-branch -func (s *RepositoriesService) ListUserRestrictions(ctx context.Context, owner, repo, branch string) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// ReplaceUserRestrictions replaces the user that have push access to a given protected branch. -// It removes all users that previously had push access and grants push access to the new list of users. -// It requires the GitHub users to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-team-access-restrictions -func (s *RepositoriesService) ReplaceUserRestrictions(ctx context.Context, owner, repo, branch string, users []string) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) - req, err := s.client.NewRequest("PUT", u, users) - if err != nil { - return nil, nil, err - } - - var newUsers []*User - resp, err := s.client.Do(ctx, req, &newUsers) - if err != nil { - return nil, resp, err - } - - return newUsers, resp, nil -} - -// AddUserRestrictions grants the specified users push access to a given protected branch. -// It requires the GitHub users to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#add-team-access-restrictions -func (s *RepositoriesService) AddUserRestrictions(ctx context.Context, owner, repo, branch string, users []string) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, users) - if err != nil { - return nil, nil, err - } - - var newUsers []*User - resp, err := s.client.Do(ctx, req, &newUsers) - if err != nil { - return nil, resp, err - } - - return newUsers, resp, nil -} - -// RemoveUserRestrictions removes the restrictions of a user from pushing to this branch. -// It requires the GitHub users to have `write` access to the `content` permission. -// -// Note: The list of users, apps, and teams in total is limited to 100 items. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-team-access-restrictions -func (s *RepositoriesService) RemoveUserRestrictions(ctx context.Context, owner, repo, branch string, users []string) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, users) - if err != nil { - return nil, nil, err - } - - var newUsers []*User - resp, err := s.client.Do(ctx, req, &newUsers) - if err != nil { - return nil, resp, err - } - - return newUsers, resp, nil -} - -// TransferRequest represents a request to transfer a repository. -type TransferRequest struct { - NewOwner string `json:"new_owner"` - NewName *string `json:"new_name,omitempty"` - TeamID []int64 `json:"team_ids,omitempty"` -} - -// Transfer transfers a repository from one account or organization to another. -// -// This method might return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it has now scheduled the transfer of the repository in a background task. -// A follow up request, after a delay of a second or so, should result -// in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#transfer-a-repository -func (s *RepositoriesService) Transfer(ctx context.Context, owner, repo string, transfer TransferRequest) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/transfer", owner, repo) - - req, err := s.client.NewRequest("POST", u, &transfer) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DispatchRequestOptions represents a request to trigger a repository_dispatch event. -type DispatchRequestOptions struct { - // EventType is a custom webhook event name. (Required.) - EventType string `json:"event_type"` - // ClientPayload is a custom JSON payload with extra information about the webhook event. - // Defaults to an empty JSON object. - ClientPayload *json.RawMessage `json:"client_payload,omitempty"` -} - -// Dispatch triggers a repository_dispatch event in a GitHub Actions workflow. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-a-repository-dispatch-event -func (s *RepositoriesService) Dispatch(ctx context.Context, owner, repo string, opts DispatchRequestOptions) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/dispatches", owner, repo) - - req, err := s.client.NewRequest("POST", u, &opts) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// isBranchNotProtected determines whether a branch is not protected -// based on the error message returned by GitHub API. -func isBranchNotProtected(err error) bool { - errorResponse, ok := err.(*ErrorResponse) - return ok && errorResponse.Message == githubBranchNotProtected -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_access.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_access.go deleted file mode 100644 index 55761eeb7e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_access.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepositoryActionsAccessLevel represents the repository actions access level. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-the-level-of-access-for-workflows-outside-of-the-repository -type RepositoryActionsAccessLevel struct { - // AccessLevel specifies the level of access that workflows outside of the repository have - // to actions and reusable workflows within the repository. - // Possible values are: "none", "organization" "enterprise". - AccessLevel *string `json:"access_level,omitempty"` -} - -// GetActionsAccessLevel gets the level of access that workflows outside of the repository have -// to actions and reusable workflows in the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-the-level-of-access-for-workflows-outside-of-the-repository -func (s *RepositoriesService) GetActionsAccessLevel(ctx context.Context, owner, repo string) (*RepositoryActionsAccessLevel, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/permissions/access", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - raal := new(RepositoryActionsAccessLevel) - resp, err := s.client.Do(ctx, req, raal) - if err != nil { - return nil, resp, err - } - - return raal, resp, nil -} - -// EditActionsAccessLevel sets the level of access that workflows outside of the repository have -// to actions and reusable workflows in the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-the-level-of-access-for-workflows-outside-of-the-repository -func (s *RepositoriesService) EditActionsAccessLevel(ctx context.Context, owner, repo string, repositoryActionsAccessLevel RepositoryActionsAccessLevel) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/permissions/access", owner, repo) - req, err := s.client.NewRequest("PUT", u, repositoryActionsAccessLevel) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_allowed.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_allowed.go deleted file mode 100644 index 25a4690583..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_allowed.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GetActionsAllowed gets the allowed actions and reusable workflows for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-allowed-actions-and-reusable-workflows-for-a-repository -func (s *RepositoriesService) GetActionsAllowed(ctx context.Context, org, repo string) (*ActionsAllowed, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/permissions/selected-actions", org, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - actionsAllowed := new(ActionsAllowed) - resp, err := s.client.Do(ctx, req, actionsAllowed) - if err != nil { - return nil, resp, err - } - - return actionsAllowed, resp, nil -} - -// EditActionsAllowed sets the allowed actions and reusable workflows for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-allowed-actions-and-reusable-workflows-for-a-repository -func (s *RepositoriesService) EditActionsAllowed(ctx context.Context, org, repo string, actionsAllowed ActionsAllowed) (*ActionsAllowed, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/permissions/selected-actions", org, repo) - req, err := s.client.NewRequest("PUT", u, actionsAllowed) - if err != nil { - return nil, nil, err - } - - p := new(ActionsAllowed) - resp, err := s.client.Do(ctx, req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go deleted file mode 100644 index 45f844cec0..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ActionsPermissionsRepository represents a policy for repositories and allowed actions in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions -type ActionsPermissionsRepository struct { - Enabled *bool `json:"enabled,omitempty"` - AllowedActions *string `json:"allowed_actions,omitempty"` - SelectedActionsURL *string `json:"selected_actions_url,omitempty"` -} - -func (a ActionsPermissionsRepository) String() string { - return Stringify(a) -} - -// GetActionsPermissions gets the GitHub Actions permissions policy for repositories and allowed actions in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-github-actions-permissions-for-a-repository -func (s *RepositoriesService) GetActionsPermissions(ctx context.Context, owner, repo string) (*ActionsPermissionsRepository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/permissions", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - permissions := new(ActionsPermissionsRepository) - resp, err := s.client.Do(ctx, req, permissions) - if err != nil { - return nil, resp, err - } - - return permissions, resp, nil -} - -// EditActionsPermissions sets the permissions policy for repositories and allowed actions in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-github-actions-permissions-for-a-repository -func (s *RepositoriesService) EditActionsPermissions(ctx context.Context, owner, repo string, actionsPermissionsRepository ActionsPermissionsRepository) (*ActionsPermissionsRepository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/actions/permissions", owner, repo) - req, err := s.client.NewRequest("PUT", u, actionsPermissionsRepository) - if err != nil { - return nil, nil, err - } - - permissions := new(ActionsPermissionsRepository) - resp, err := s.client.Do(ctx, req, permissions) - if err != nil { - return nil, resp, err - } - - return permissions, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_autolinks.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_autolinks.go deleted file mode 100644 index 0d2cec618f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_autolinks.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// AutolinkOptions specifies parameters for RepositoriesService.AddAutolink method. -type AutolinkOptions struct { - KeyPrefix *string `json:"key_prefix,omitempty"` - URLTemplate *string `json:"url_template,omitempty"` - IsAlphanumeric *bool `json:"is_alphanumeric,omitempty"` -} - -// Autolink represents autolinks to external resources like JIRA issues and Zendesk tickets. -type Autolink struct { - ID *int64 `json:"id,omitempty"` - KeyPrefix *string `json:"key_prefix,omitempty"` - URLTemplate *string `json:"url_template,omitempty"` - IsAlphanumeric *bool `json:"is_alphanumeric,omitempty"` -} - -// ListAutolinks returns a list of autolinks configured for the given repository. -// Information about autolinks are only available to repository administrators. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#list-all-autolinks-of-a-repository -func (s *RepositoriesService) ListAutolinks(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Autolink, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/autolinks", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var autolinks []*Autolink - resp, err := s.client.Do(ctx, req, &autolinks) - if err != nil { - return nil, resp, err - } - - return autolinks, resp, nil -} - -// AddAutolink creates an autolink reference for a repository. -// Users with admin access to the repository can create an autolink. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#create-an-autolink-reference-for-a-repository -func (s *RepositoriesService) AddAutolink(ctx context.Context, owner, repo string, opts *AutolinkOptions) (*Autolink, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/autolinks", owner, repo) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - al := new(Autolink) - resp, err := s.client.Do(ctx, req, al) - if err != nil { - return nil, resp, err - } - return al, resp, nil -} - -// GetAutolink returns a single autolink reference by ID that was configured for the given repository. -// Information about autolinks are only available to repository administrators. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#get-an-autolink-reference-of-a-repository -func (s *RepositoriesService) GetAutolink(ctx context.Context, owner, repo string, id int64) (*Autolink, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/autolinks/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var autolink *Autolink - resp, err := s.client.Do(ctx, req, &autolink) - if err != nil { - return nil, resp, err - } - - return autolink, resp, nil -} - -// DeleteAutolink deletes a single autolink reference by ID that was configured for the given repository. -// Information about autolinks are only available to repository administrators. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#delete-an-autolink-reference-from-a-repository -func (s *RepositoriesService) DeleteAutolink(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/autolinks/%v", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_codeowners.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_codeowners.go deleted file mode 100644 index 835d56e164..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_codeowners.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// CodeownersErrors represents a list of syntax errors detected in the CODEOWNERS file. -type CodeownersErrors struct { - Errors []*CodeownersError `json:"errors"` -} - -// CodeownersError represents a syntax error detected in the CODEOWNERS file. -type CodeownersError struct { - Line int `json:"line"` - Column int `json:"column"` - Kind string `json:"kind"` - Source string `json:"source"` - Suggestion *string `json:"suggestion,omitempty"` - Message string `json:"message"` - Path string `json:"path"` -} - -// GetCodeownersErrors lists any syntax errors that are detected in the CODEOWNERS file. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-codeowners-errors -func (s *RepositoriesService) GetCodeownersErrors(ctx context.Context, owner, repo string) (*CodeownersErrors, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/codeowners/errors", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - codeownersErrors := &CodeownersErrors{} - resp, err := s.client.Do(ctx, req, codeownersErrors) - if err != nil { - return nil, resp, err - } - - return codeownersErrors, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_collaborators.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_collaborators.go deleted file mode 100644 index c2396872f2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_collaborators.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListCollaboratorsOptions specifies the optional parameters to the -// RepositoriesService.ListCollaborators method. -type ListCollaboratorsOptions struct { - // Affiliation specifies how collaborators should be filtered by their affiliation. - // Possible values are: - // outside - All outside collaborators of an organization-owned repository - // direct - All collaborators with permissions to an organization-owned repository, - // regardless of organization membership status - // all - All collaborators the authenticated user can see - // - // Default value is "all". - Affiliation string `url:"affiliation,omitempty"` - - // Permission specifies how collaborators should be filtered by the permissions they have on the repository. - // Possible values are: - // "pull", "triage", "push", "maintain", "admin" - // - // If not specified, all collaborators will be returned. - Permission string `url:"permission,omitempty"` - - ListOptions -} - -// CollaboratorInvitation represents an invitation created when adding a collaborator. -// GitHub API docs: https://docs.github.com/en/rest/repos/collaborators/#response-when-a-new-invitation-is-created -type CollaboratorInvitation struct { - ID *int64 `json:"id,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Invitee *User `json:"invitee,omitempty"` - Inviter *User `json:"inviter,omitempty"` - Permissions *string `json:"permissions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` -} - -// ListCollaborators lists the GitHub users that have access to the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#list-repository-collaborators -func (s *RepositoriesService) ListCollaborators(ctx context.Context, owner, repo string, opts *ListCollaboratorsOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// IsCollaborator checks whether the specified GitHub user has collaborator -// access to the given repo. -// Note: This will return false if the user is not a collaborator OR the user -// is not a GitHub user. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#check-if-a-user-is-a-repository-collaborator -func (s *RepositoriesService) IsCollaborator(ctx context.Context, owner, repo, user string) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - isCollab, err := parseBoolResponse(err) - return isCollab, resp, err -} - -// RepositoryPermissionLevel represents the permission level an organization -// member has for a given repository. -type RepositoryPermissionLevel struct { - // Possible values: "admin", "write", "read", "none" - Permission *string `json:"permission,omitempty"` - - User *User `json:"user,omitempty"` -} - -// GetPermissionLevel retrieves the specific permission level a collaborator has for a given repository. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#get-repository-permissions-for-a-user -func (s *RepositoriesService) GetPermissionLevel(ctx context.Context, owner, repo, user string) (*RepositoryPermissionLevel, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v/permission", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - rpl := new(RepositoryPermissionLevel) - resp, err := s.client.Do(ctx, req, rpl) - if err != nil { - return nil, resp, err - } - - return rpl, resp, nil -} - -// RepositoryAddCollaboratorOptions specifies the optional parameters to the -// RepositoriesService.AddCollaborator method. -type RepositoryAddCollaboratorOptions struct { - // Permission specifies the permission to grant the user on this repository. - // Possible values are: - // pull - team members can pull, but not push to or administer this repository - // push - team members can pull and push, but not administer this repository - // admin - team members can pull, push and administer this repository - // maintain - team members can manage the repository without access to sensitive or destructive actions. - // triage - team members can proactively manage issues and pull requests without write access. - // - // Default value is "push". This option is only valid for organization-owned repositories. - Permission string `json:"permission,omitempty"` -} - -// AddCollaborator sends an invitation to the specified GitHub user -// to become a collaborator to the given repo. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#add-a-repository-collaborator -func (s *RepositoriesService) AddCollaborator(ctx context.Context, owner, repo, user string, opts *RepositoryAddCollaboratorOptions) (*CollaboratorInvitation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, nil, err - } - - acr := new(CollaboratorInvitation) - resp, err := s.client.Do(ctx, req, acr) - if err != nil { - return nil, resp, err - } - - return acr, resp, nil -} - -// RemoveCollaborator removes the specified GitHub user as collaborator from the given repo. -// Note: Does not return error if a valid user that is not a collaborator is removed. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#remove-a-repository-collaborator -func (s *RepositoriesService) RemoveCollaborator(ctx context.Context, owner, repo, user string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_comments.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_comments.go deleted file mode 100644 index e282374e9e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_comments.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepositoryComment represents a comment for a commit, file, or line in a repository. -type RepositoryComment struct { - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - User *User `json:"user,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - - // User-mutable fields - Body *string `json:"body"` - // User-initialized fields - Path *string `json:"path,omitempty"` - Position *int `json:"position,omitempty"` -} - -func (r RepositoryComment) String() string { - return Stringify(r) -} - -// ListComments lists all the comments for the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/comments#list-commit-comments-for-a-repository -func (s *RepositoriesService) ListComments(ctx context.Context, owner, repo string, opts *ListOptions) ([]*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var comments []*RepositoryComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// ListCommitComments lists all the comments for a given commit SHA. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/comments#list-commit-comments -func (s *RepositoriesService) ListCommitComments(ctx context.Context, owner, repo, sha string, opts *ListOptions) ([]*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - var comments []*RepositoryComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// CreateComment creates a comment for the given commit. -// Note: GitHub allows for comments to be created for non-existing files and positions. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/comments#create-a-commit-comment -func (s *RepositoriesService) CreateComment(ctx context.Context, owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(RepositoryComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// GetComment gets a single comment from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/comments#get-a-commit-comment -func (s *RepositoriesService) GetComment(ctx context.Context, owner, repo string, id int64) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - - c := new(RepositoryComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// UpdateComment updates the body of a single comment. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/comments#update-a-commit-comment -func (s *RepositoriesService) UpdateComment(ctx context.Context, owner, repo string, id int64, comment *RepositoryComment) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(RepositoryComment) - resp, err := s.client.Do(ctx, req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// DeleteComment deletes a single comment from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/comments#delete-a-commit-comment -func (s *RepositoriesService) DeleteComment(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_commits.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_commits.go deleted file mode 100644 index d1fb577c61..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_commits.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "context" - "fmt" - "net/url" - "time" -) - -// RepositoryCommit represents a commit in a repo. -// Note that it's wrapping a Commit, so author/committer information is in two places, -// but contain different details about them: in RepositoryCommit "github details", in Commit - "git details". -type RepositoryCommit struct { - NodeID *string `json:"node_id,omitempty"` - SHA *string `json:"sha,omitempty"` - Commit *Commit `json:"commit,omitempty"` - Author *User `json:"author,omitempty"` - Committer *User `json:"committer,omitempty"` - Parents []*Commit `json:"parents,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - - // Details about how many changes were made in this commit. Only filled in during GetCommit! - Stats *CommitStats `json:"stats,omitempty"` - // Details about which files, and how this commit touched. Only filled in during GetCommit! - Files []*CommitFile `json:"files,omitempty"` -} - -func (r RepositoryCommit) String() string { - return Stringify(r) -} - -// CommitStats represents the number of additions / deletions from a file in a given RepositoryCommit or GistCommit. -type CommitStats struct { - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - Total *int `json:"total,omitempty"` -} - -func (c CommitStats) String() string { - return Stringify(c) -} - -// CommitFile represents a file modified in a commit. -type CommitFile struct { - SHA *string `json:"sha,omitempty"` - Filename *string `json:"filename,omitempty"` - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - Changes *int `json:"changes,omitempty"` - Status *string `json:"status,omitempty"` - Patch *string `json:"patch,omitempty"` - BlobURL *string `json:"blob_url,omitempty"` - RawURL *string `json:"raw_url,omitempty"` - ContentsURL *string `json:"contents_url,omitempty"` - PreviousFilename *string `json:"previous_filename,omitempty"` -} - -func (c CommitFile) String() string { - return Stringify(c) -} - -// CommitsComparison is the result of comparing two commits. -// See CompareCommits() for details. -type CommitsComparison struct { - BaseCommit *RepositoryCommit `json:"base_commit,omitempty"` - MergeBaseCommit *RepositoryCommit `json:"merge_base_commit,omitempty"` - - // Head can be 'behind' or 'ahead' - Status *string `json:"status,omitempty"` - AheadBy *int `json:"ahead_by,omitempty"` - BehindBy *int `json:"behind_by,omitempty"` - TotalCommits *int `json:"total_commits,omitempty"` - - Commits []*RepositoryCommit `json:"commits,omitempty"` - - Files []*CommitFile `json:"files,omitempty"` - - HTMLURL *string `json:"html_url,omitempty"` - PermalinkURL *string `json:"permalink_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` - URL *string `json:"url,omitempty"` // API URL. -} - -func (c CommitsComparison) String() string { - return Stringify(c) -} - -// CommitsListOptions specifies the optional parameters to the -// RepositoriesService.ListCommits method. -type CommitsListOptions struct { - // SHA or branch to start listing Commits from. - SHA string `url:"sha,omitempty"` - - // Path that should be touched by the returned Commits. - Path string `url:"path,omitempty"` - - // Author of by which to filter Commits. - Author string `url:"author,omitempty"` - - // Since when should Commits be included in the response. - Since time.Time `url:"since,omitempty"` - - // Until when should Commits be included in the response. - Until time.Time `url:"until,omitempty"` - - ListOptions -} - -// BranchCommit is the result of listing branches with commit SHA. -type BranchCommit struct { - Name *string `json:"name,omitempty"` - Commit *Commit `json:"commit,omitempty"` - Protected *bool `json:"protected,omitempty"` -} - -// ListCommits lists the commits of a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#list-commits -func (s *RepositoriesService) ListCommits(ctx context.Context, owner, repo string, opts *CommitsListOptions) ([]*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var commits []*RepositoryCommit - resp, err := s.client.Do(ctx, req, &commits) - if err != nil { - return nil, resp, err - } - - return commits, resp, nil -} - -// GetCommit fetches the specified commit, including all details about it. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-single-commit -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-commit -func (s *RepositoriesService) GetCommit(ctx context.Context, owner, repo, sha string, opts *ListOptions) (*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - commit := new(RepositoryCommit) - resp, err := s.client.Do(ctx, req, commit) - if err != nil { - return nil, resp, err - } - - return commit, resp, nil -} - -// GetCommitRaw fetches the specified commit in raw (diff or patch) format. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-commit -func (s *RepositoriesService) GetCommitRaw(ctx context.Context, owner string, repo string, sha string, opts RawOptions) (string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - switch opts.Type { - case Diff: - req.Header.Set("Accept", mediaTypeV3Diff) - case Patch: - req.Header.Set("Accept", mediaTypeV3Patch) - default: - return "", nil, fmt.Errorf("unsupported raw type %d", opts.Type) - } - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// GetCommitSHA1 gets the SHA-1 of a commit reference. If a last-known SHA1 is -// supplied and no new commits have occurred, a 304 Unmodified response is returned. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-commit -func (s *RepositoriesService) GetCommitSHA1(ctx context.Context, owner, repo, ref, lastSHA string) (string, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, refURLEscape(ref)) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - if lastSHA != "" { - req.Header.Set("If-None-Match", `"`+lastSHA+`"`) - } - - req.Header.Set("Accept", mediaTypeV3SHA) - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// CompareCommits compares a range of commits with each other. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#compare-two-commits -func (s *RepositoriesService) CompareCommits(ctx context.Context, owner, repo string, base, head string, opts *ListOptions) (*CommitsComparison, *Response, error) { - escapedBase := url.QueryEscape(base) - escapedHead := url.QueryEscape(head) - - u := fmt.Sprintf("repos/%v/%v/compare/%v...%v", owner, repo, escapedBase, escapedHead) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comp := new(CommitsComparison) - resp, err := s.client.Do(ctx, req, comp) - if err != nil { - return nil, resp, err - } - - return comp, resp, nil -} - -// CompareCommitsRaw compares a range of commits with each other in raw (diff or patch) format. -// -// Both "base" and "head" must be branch names in "repo". -// To compare branches across other repositories in the same network as "repo", -// use the format ":branch". -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#compare-two-commits -func (s *RepositoriesService) CompareCommitsRaw(ctx context.Context, owner, repo, base, head string, opts RawOptions) (string, *Response, error) { - escapedBase := url.QueryEscape(base) - escapedHead := url.QueryEscape(head) - - u := fmt.Sprintf("repos/%v/%v/compare/%v...%v", owner, repo, escapedBase, escapedHead) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - switch opts.Type { - case Diff: - req.Header.Set("Accept", mediaTypeV3Diff) - case Patch: - req.Header.Set("Accept", mediaTypeV3Patch) - default: - return "", nil, fmt.Errorf("unsupported raw type %d", opts.Type) - } - - var buf bytes.Buffer - resp, err := s.client.Do(ctx, req, &buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// ListBranchesHeadCommit gets all branches where the given commit SHA is the HEAD, -// or latest commit for the branch. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/commits#list-branches-for-head-commit -func (s *RepositoriesService) ListBranchesHeadCommit(ctx context.Context, owner, repo, sha string) ([]*BranchCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/branches-where-head", owner, repo, sha) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeListPullsOrBranchesForCommitPreview) - var branchCommits []*BranchCommit - resp, err := s.client.Do(ctx, req, &branchCommits) - if err != nil { - return nil, resp, err - } - - return branchCommits, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_community_health.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_community_health.go deleted file mode 100644 index 750ee15827..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_community_health.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Metric represents the different fields for one file in community health files. -type Metric struct { - Name *string `json:"name"` - Key *string `json:"key"` - SPDXID *string `json:"spdx_id"` - URL *string `json:"url"` - HTMLURL *string `json:"html_url"` - NodeID *string `json:"node_id"` -} - -// CommunityHealthFiles represents the different files in the community health metrics response. -type CommunityHealthFiles struct { - CodeOfConduct *Metric `json:"code_of_conduct"` - CodeOfConductFile *Metric `json:"code_of_conduct_file"` - Contributing *Metric `json:"contributing"` - IssueTemplate *Metric `json:"issue_template"` - PullRequestTemplate *Metric `json:"pull_request_template"` - License *Metric `json:"license"` - Readme *Metric `json:"readme"` -} - -// CommunityHealthMetrics represents a response containing the community metrics of a repository. -type CommunityHealthMetrics struct { - HealthPercentage *int `json:"health_percentage"` - Description *string `json:"description"` - Documentation *string `json:"documentation"` - Files *CommunityHealthFiles `json:"files"` - UpdatedAt *Timestamp `json:"updated_at"` - ContentReportsEnabled *bool `json:"content_reports_enabled"` -} - -// GetCommunityHealthMetrics retrieves all the community health metrics for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/community#get-community-profile-metrics -func (s *RepositoriesService) GetCommunityHealthMetrics(ctx context.Context, owner, repo string) (*CommunityHealthMetrics, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/community/profile", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - metrics := &CommunityHealthMetrics{} - resp, err := s.client.Do(ctx, req, metrics) - if err != nil { - return nil, resp, err - } - - return metrics, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_contents.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_contents.go deleted file mode 100644 index 874a327728..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_contents.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Repository contents API methods. -// GitHub API docs: https://docs.github.com/en/rest/repos/contents/ - -package github - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "path" - "strings" -) - -// RepositoryContent represents a file or directory in a github repository. -type RepositoryContent struct { - Type *string `json:"type,omitempty"` - // Target is only set if the type is "symlink" and the target is not a normal file. - // If Target is set, Path will be the symlink path. - Target *string `json:"target,omitempty"` - Encoding *string `json:"encoding,omitempty"` - Size *int `json:"size,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - // Content contains the actual file content, which may be encoded. - // Callers should call GetContent which will decode the content if - // necessary. - Content *string `json:"content,omitempty"` - SHA *string `json:"sha,omitempty"` - URL *string `json:"url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DownloadURL *string `json:"download_url,omitempty"` -} - -// RepositoryContentResponse holds the parsed response from CreateFile, UpdateFile, and DeleteFile. -type RepositoryContentResponse struct { - Content *RepositoryContent `json:"content,omitempty"` - Commit `json:"commit,omitempty"` -} - -// RepositoryContentFileOptions specifies optional parameters for CreateFile, UpdateFile, and DeleteFile. -type RepositoryContentFileOptions struct { - Message *string `json:"message,omitempty"` - Content []byte `json:"content"` // unencoded - SHA *string `json:"sha,omitempty"` - Branch *string `json:"branch,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` -} - -// RepositoryContentGetOptions represents an optional ref parameter, which can be a SHA, -// branch, or tag -type RepositoryContentGetOptions struct { - Ref string `url:"ref,omitempty"` -} - -// String converts RepositoryContent to a string. It's primarily for testing. -func (r RepositoryContent) String() string { - return Stringify(r) -} - -// GetContent returns the content of r, decoding it if necessary. -func (r *RepositoryContent) GetContent() (string, error) { - var encoding string - if r.Encoding != nil { - encoding = *r.Encoding - } - - switch encoding { - case "base64": - if r.Content == nil { - return "", errors.New("malformed response: base64 encoding of null content") - } - c, err := base64.StdEncoding.DecodeString(*r.Content) - return string(c), err - case "": - if r.Content == nil { - return "", nil - } - return *r.Content, nil - default: - return "", fmt.Errorf("unsupported content encoding: %v", encoding) - } -} - -// GetReadme gets the Readme file for the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/contents#get-a-repository-readme -func (s *RepositoriesService) GetReadme(ctx context.Context, owner, repo string, opts *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/readme", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - readme := new(RepositoryContent) - resp, err := s.client.Do(ctx, req, readme) - if err != nil { - return nil, resp, err - } - - return readme, resp, nil -} - -// DownloadContents returns an io.ReadCloser that reads the contents of the -// specified file. This function will work with files of any size, as opposed -// to GetContents which is limited to 1 Mb files. It is the caller's -// responsibility to close the ReadCloser. -// -// It is possible for the download to result in a failed response when the -// returned error is nil. Callers should check the returned Response status -// code to verify the content is from a successful response. -func (s *RepositoriesService) DownloadContents(ctx context.Context, owner, repo, filepath string, opts *RepositoryContentGetOptions) (io.ReadCloser, *Response, error) { - dir := path.Dir(filepath) - filename := path.Base(filepath) - _, dirContents, resp, err := s.GetContents(ctx, owner, repo, dir, opts) - if err != nil { - return nil, resp, err - } - - for _, contents := range dirContents { - if *contents.Name == filename { - if contents.DownloadURL == nil || *contents.DownloadURL == "" { - return nil, resp, fmt.Errorf("no download link found for %s", filepath) - } - - dlResp, err := s.client.client.Get(*contents.DownloadURL) - if err != nil { - return nil, &Response{Response: dlResp}, err - } - - return dlResp.Body, &Response{Response: dlResp}, nil - } - } - - return nil, resp, fmt.Errorf("no file named %s found in %s", filename, dir) -} - -// DownloadContentsWithMeta is identical to DownloadContents but additionally -// returns the RepositoryContent of the requested file. This additional data -// is useful for future operations involving the requested file. For merely -// reading the content of a file, DownloadContents is perfectly adequate. -// -// It is possible for the download to result in a failed response when the -// returned error is nil. Callers should check the returned Response status -// code to verify the content is from a successful response. -func (s *RepositoriesService) DownloadContentsWithMeta(ctx context.Context, owner, repo, filepath string, opts *RepositoryContentGetOptions) (io.ReadCloser, *RepositoryContent, *Response, error) { - dir := path.Dir(filepath) - filename := path.Base(filepath) - _, dirContents, resp, err := s.GetContents(ctx, owner, repo, dir, opts) - if err != nil { - return nil, nil, resp, err - } - - for _, contents := range dirContents { - if *contents.Name == filename { - if contents.DownloadURL == nil || *contents.DownloadURL == "" { - return nil, contents, resp, fmt.Errorf("no download link found for %s", filepath) - } - - dlResp, err := s.client.client.Get(*contents.DownloadURL) - if err != nil { - return nil, contents, &Response{Response: dlResp}, err - } - - return dlResp.Body, contents, &Response{Response: dlResp}, nil - } - } - - return nil, nil, resp, fmt.Errorf("no file named %s found in %s", filename, dir) -} - -// GetContents can return either the metadata and content of a single file -// (when path references a file) or the metadata of all the files and/or -// subdirectories of a directory (when path references a directory). To make it -// easy to distinguish between both result types and to mimic the API as much -// as possible, both result types will be returned but only one will contain a -// value and the other will be nil. -// -// Due to an auth vulnerability issue in the GitHub v3 API, ".." is not allowed -// to appear anywhere in the "path" or this method will return an error. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/contents#get-repository-content -func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path string, opts *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) { - if strings.Contains(path, "..") { - return nil, nil, nil, errors.New("path must not contain '..' due to auth vulnerability issue") - } - - escapedPath := (&url.URL{Path: strings.TrimSuffix(path, "/")}).String() - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath) - u, err = addOptions(u, opts) - if err != nil { - return nil, nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, nil, err - } - - var rawJSON json.RawMessage - resp, err = s.client.Do(ctx, req, &rawJSON) - if err != nil { - return nil, nil, resp, err - } - - fileUnmarshalError := json.Unmarshal(rawJSON, &fileContent) - if fileUnmarshalError == nil { - return fileContent, nil, resp, nil - } - - directoryUnmarshalError := json.Unmarshal(rawJSON, &directoryContent) - if directoryUnmarshalError == nil { - return nil, directoryContent, resp, nil - } - - return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s", fileUnmarshalError, directoryUnmarshalError) -} - -// CreateFile creates a new file in a repository at the given path and returns -// the commit and file metadata. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/contents#create-or-update-file-contents -func (s *RepositoriesService) CreateFile(ctx context.Context, owner, repo, path string, opts *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, nil, err - } - - createResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(ctx, req, createResponse) - if err != nil { - return nil, resp, err - } - - return createResponse, resp, nil -} - -// UpdateFile updates a file in a repository at the given path and returns the -// commit and file metadata. Requires the blob SHA of the file being updated. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/contents#create-or-update-file-contents -func (s *RepositoriesService) UpdateFile(ctx context.Context, owner, repo, path string, opts *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, nil, err - } - - updateResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(ctx, req, updateResponse) - if err != nil { - return nil, resp, err - } - - return updateResponse, resp, nil -} - -// DeleteFile deletes a file from a repository and returns the commit. -// Requires the blob SHA of the file to be deleted. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/contents#delete-a-file -func (s *RepositoriesService) DeleteFile(ctx context.Context, owner, repo, path string, opts *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("DELETE", u, opts) - if err != nil { - return nil, nil, err - } - - deleteResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(ctx, req, deleteResponse) - if err != nil { - return nil, resp, err - } - - return deleteResponse, resp, nil -} - -// ArchiveFormat is used to define the archive type when calling GetArchiveLink. -type ArchiveFormat string - -const ( - // Tarball specifies an archive in gzipped tar format. - Tarball ArchiveFormat = "tarball" - - // Zipball specifies an archive in zip format. - Zipball ArchiveFormat = "zipball" -) - -// GetArchiveLink returns an URL to download a tarball or zipball archive for a -// repository. The archiveFormat can be specified by either the github.Tarball -// or github.Zipball constant. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/contents/#get-archive-link -func (s *RepositoriesService) GetArchiveLink(ctx context.Context, owner, repo string, archiveformat ArchiveFormat, opts *RepositoryContentGetOptions, followRedirects bool) (*url.URL, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/%s", owner, repo, archiveformat) - if opts != nil && opts.Ref != "" { - u += fmt.Sprintf("/%s", opts.Ref) - } - resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusFound { - return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) - } - - parsedURL, err := url.Parse(resp.Header.Get("Location")) - if err != nil { - return nil, newResponse(resp), err - } - - return parsedURL, newResponse(resp), nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go deleted file mode 100644 index 8c4628b39b..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// DeploymentBranchPolicy represents a single deployment branch policy for an environment. -type DeploymentBranchPolicy struct { - Name *string `json:"name,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// DeploymentBranchPolicyResponse represents the slightly different format of response that comes back when you list deployment branch policies. -type DeploymentBranchPolicyResponse struct { - TotalCount *int `json:"total_count,omitempty"` - BranchPolicies []*DeploymentBranchPolicy `json:"branch_policies,omitempty"` -} - -// DeploymentBranchPolicyRequest represents a deployment branch policy request. -type DeploymentBranchPolicyRequest struct { - Name *string `json:"name,omitempty"` -} - -// ListDeploymentBranchPolicies lists the deployment branch policies for an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#list-deployment-branch-policies -func (s *RepositoriesService) ListDeploymentBranchPolicies(ctx context.Context, owner, repo, environment string) (*DeploymentBranchPolicyResponse, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies", owner, repo, environment) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var list *DeploymentBranchPolicyResponse - resp, err := s.client.Do(ctx, req, &list) - if err != nil { - return nil, resp, err - } - - return list, resp, nil -} - -// GetDeploymentBranchPolicy gets a deployment branch policy for an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#get-a-deployment-branch-policy -func (s *RepositoriesService) GetDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, branchPolicyID int64) (*DeploymentBranchPolicy, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies/%v", owner, repo, environment, branchPolicyID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var policy *DeploymentBranchPolicy - resp, err := s.client.Do(ctx, req, &policy) - if err != nil { - return nil, resp, err - } - - return policy, resp, nil -} - -// CreateDeploymentBranchPolicy creates a deployment branch policy for an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#create-a-deployment-branch-policy -func (s *RepositoriesService) CreateDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, request *DeploymentBranchPolicyRequest) (*DeploymentBranchPolicy, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies", owner, repo, environment) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - var policy *DeploymentBranchPolicy - resp, err := s.client.Do(ctx, req, &policy) - if err != nil { - return nil, resp, err - } - - return policy, resp, nil -} - -// UpdateDeploymentBranchPolicy updates a deployment branch policy for an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#update-a-deployment-branch-policy -func (s *RepositoriesService) UpdateDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, branchPolicyID int64, request *DeploymentBranchPolicyRequest) (*DeploymentBranchPolicy, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies/%v", owner, repo, environment, branchPolicyID) - - req, err := s.client.NewRequest("PUT", u, request) - if err != nil { - return nil, nil, err - } - - var policy *DeploymentBranchPolicy - resp, err := s.client.Do(ctx, req, &policy) - if err != nil { - return nil, resp, err - } - - return policy, resp, nil -} - -// DeleteDeploymentBranchPolicy deletes a deployment branch policy for an environment. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#delete-a-deployment-branch-policy -func (s *RepositoriesService) DeleteDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, branchPolicyID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies/%v", owner, repo, environment, branchPolicyID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_deployments.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_deployments.go deleted file mode 100644 index 36445f895e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_deployments.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" - "strings" -) - -// Deployment represents a deployment in a repo -type Deployment struct { - URL *string `json:"url,omitempty"` - ID *int64 `json:"id,omitempty"` - SHA *string `json:"sha,omitempty"` - Ref *string `json:"ref,omitempty"` - Task *string `json:"task,omitempty"` - Payload json.RawMessage `json:"payload,omitempty"` - Environment *string `json:"environment,omitempty"` - Description *string `json:"description,omitempty"` - Creator *User `json:"creator,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -// DeploymentRequest represents a deployment request -type DeploymentRequest struct { - Ref *string `json:"ref,omitempty"` - Task *string `json:"task,omitempty"` - AutoMerge *bool `json:"auto_merge,omitempty"` - RequiredContexts *[]string `json:"required_contexts,omitempty"` - Payload interface{} `json:"payload,omitempty"` - Environment *string `json:"environment,omitempty"` - Description *string `json:"description,omitempty"` - TransientEnvironment *bool `json:"transient_environment,omitempty"` - ProductionEnvironment *bool `json:"production_environment,omitempty"` -} - -// DeploymentsListOptions specifies the optional parameters to the -// RepositoriesService.ListDeployments method. -type DeploymentsListOptions struct { - // SHA of the Deployment. - SHA string `url:"sha,omitempty"` - - // List deployments for a given ref. - Ref string `url:"ref,omitempty"` - - // List deployments for a given task. - Task string `url:"task,omitempty"` - - // List deployments for a given environment. - Environment string `url:"environment,omitempty"` - - ListOptions -} - -// ListDeployments lists the deployments of a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#list-deployments -func (s *RepositoriesService) ListDeployments(ctx context.Context, owner, repo string, opts *DeploymentsListOptions) ([]*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var deployments []*Deployment - resp, err := s.client.Do(ctx, req, &deployments) - if err != nil { - return nil, resp, err - } - - return deployments, resp, nil -} - -// GetDeployment returns a single deployment of a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#get-a-deployment -func (s *RepositoriesService) GetDeployment(ctx context.Context, owner, repo string, deploymentID int64) (*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - deployment := new(Deployment) - resp, err := s.client.Do(ctx, req, deployment) - if err != nil { - return nil, resp, err - } - - return deployment, resp, nil -} - -// CreateDeployment creates a new deployment for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#create-a-deployment -func (s *RepositoriesService) CreateDeployment(ctx context.Context, owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeDeploymentStatusPreview, mediaTypeExpandDeploymentStatusPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - d := new(Deployment) - resp, err := s.client.Do(ctx, req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// DeleteDeployment deletes an existing deployment for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#delete-a-deployment -func (s *RepositoriesService) DeleteDeployment(ctx context.Context, owner, repo string, deploymentID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// DeploymentStatus represents the status of a -// particular deployment. -type DeploymentStatus struct { - ID *int64 `json:"id,omitempty"` - // State is the deployment state. - // Possible values are: "pending", "success", "failure", "error", - // "inactive", "in_progress", "queued". - State *string `json:"state,omitempty"` - Creator *User `json:"creator,omitempty"` - Description *string `json:"description,omitempty"` - Environment *string `json:"environment,omitempty"` - NodeID *string `json:"node_id,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - TargetURL *string `json:"target_url,omitempty"` - DeploymentURL *string `json:"deployment_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` - EnvironmentURL *string `json:"environment_url,omitempty"` - LogURL *string `json:"log_url,omitempty"` - URL *string `json:"url,omitempty"` -} - -// DeploymentStatusRequest represents a deployment request -type DeploymentStatusRequest struct { - State *string `json:"state,omitempty"` - LogURL *string `json:"log_url,omitempty"` - Description *string `json:"description,omitempty"` - Environment *string `json:"environment,omitempty"` - EnvironmentURL *string `json:"environment_url,omitempty"` - AutoInactive *bool `json:"auto_inactive,omitempty"` -} - -// ListDeploymentStatuses lists the statuses of a given deployment of a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/statuses#list-deployment-statuses -func (s *RepositoriesService) ListDeploymentStatuses(ctx context.Context, owner, repo string, deployment int64, opts *ListOptions) ([]*DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeDeploymentStatusPreview, mediaTypeExpandDeploymentStatusPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var statuses []*DeploymentStatus - resp, err := s.client.Do(ctx, req, &statuses) - if err != nil { - return nil, resp, err - } - - return statuses, resp, nil -} - -// GetDeploymentStatus returns a single deployment status of a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/statuses#get-a-deployment-status -func (s *RepositoriesService) GetDeploymentStatus(ctx context.Context, owner, repo string, deploymentID, deploymentStatusID int64) (*DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses/%v", owner, repo, deploymentID, deploymentStatusID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeDeploymentStatusPreview, mediaTypeExpandDeploymentStatusPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - d := new(DeploymentStatus) - resp, err := s.client.Do(ctx, req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateDeploymentStatus creates a new status for a deployment. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/statuses#create-a-deployment-status -func (s *RepositoriesService) CreateDeploymentStatus(ctx context.Context, owner, repo string, deployment int64, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - acceptHeaders := []string{mediaTypeDeploymentStatusPreview, mediaTypeExpandDeploymentStatusPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - d := new(DeploymentStatus) - resp, err := s.client.Do(ctx, req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_environments.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_environments.go deleted file mode 100644 index 2399a42c74..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_environments.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" - "net/http" -) - -// Environment represents a single environment in a repository. -type Environment struct { - Owner *string `json:"owner,omitempty"` - Repo *string `json:"repo,omitempty"` - EnvironmentName *string `json:"environment_name,omitempty"` - WaitTimer *int `json:"wait_timer,omitempty"` - Reviewers []*EnvReviewers `json:"reviewers,omitempty"` - DeploymentBranchPolicy *BranchPolicy `json:"deployment_branch_policy,omitempty"` - // Return/response only values - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - CanAdminsBypass *bool `json:"can_admins_bypass,omitempty"` - ProtectionRules []*ProtectionRule `json:"protection_rules,omitempty"` -} - -// EnvReviewers represents a single environment reviewer entry. -type EnvReviewers struct { - Type *string `json:"type,omitempty"` - ID *int64 `json:"id,omitempty"` -} - -// BranchPolicy represents the options for whether a branch deployment policy is applied to this environment. -type BranchPolicy struct { - ProtectedBranches *bool `json:"protected_branches,omitempty"` - CustomBranchPolicies *bool `json:"custom_branch_policies,omitempty"` -} - -// EnvResponse represents the slightly different format of response that comes back when you list an environment. -type EnvResponse struct { - TotalCount *int `json:"total_count,omitempty"` - Environments []*Environment `json:"environments,omitempty"` -} - -// ProtectionRule represents a single protection rule applied to the environment. -type ProtectionRule struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Type *string `json:"type,omitempty"` - WaitTimer *int `json:"wait_timer,omitempty"` - Reviewers []*RequiredReviewer `json:"reviewers,omitempty"` -} - -// RequiredReviewer represents a required reviewer. -type RequiredReviewer struct { - Type *string `json:"type,omitempty"` - Reviewer interface{} `json:"reviewer,omitempty"` -} - -// EnvironmentListOptions specifies the optional parameters to the -// RepositoriesService.ListEnvironments method. -type EnvironmentListOptions struct { - ListOptions -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// This helps us handle the fact that RequiredReviewer can have either a User or Team type reviewer field. -func (r *RequiredReviewer) UnmarshalJSON(data []byte) error { - type aliasReviewer RequiredReviewer - var reviewer aliasReviewer - if err := json.Unmarshal(data, &reviewer); err != nil { - return err - } - - r.Type = reviewer.Type - - switch *reviewer.Type { - case "User": - reviewer.Reviewer = &User{} - if err := json.Unmarshal(data, &reviewer); err != nil { - return err - } - r.Reviewer = reviewer.Reviewer - case "Team": - reviewer.Reviewer = &Team{} - if err := json.Unmarshal(data, &reviewer); err != nil { - return err - } - r.Reviewer = reviewer.Reviewer - default: - r.Type = nil - r.Reviewer = nil - return fmt.Errorf("reviewer.Type is %T, not a string of 'User' or 'Team', unable to unmarshal", reviewer.Type) - } - - return nil -} - -// ListEnvironments lists all environments for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#get-all-environments -func (s *RepositoriesService) ListEnvironments(ctx context.Context, owner, repo string, opts *EnvironmentListOptions) (*EnvResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/environments", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var list *EnvResponse - resp, err := s.client.Do(ctx, req, &list) - if err != nil { - return nil, resp, err - } - return list, resp, nil -} - -// GetEnvironment get a single environment for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#get-an-environment -func (s *RepositoriesService) GetEnvironment(ctx context.Context, owner, repo, name string) (*Environment, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/environments/%s", owner, repo, name) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var env *Environment - resp, err := s.client.Do(ctx, req, &env) - if err != nil { - return nil, resp, err - } - return env, resp, nil -} - -// MarshalJSON implements the json.Marshaler interface. -// As the only way to clear a WaitTimer is to set it to 0, a missing WaitTimer object should default to 0, not null. -// As the default value for CanAdminsBypass is true, a nil value here marshals to true. -func (c *CreateUpdateEnvironment) MarshalJSON() ([]byte, error) { - type Alias CreateUpdateEnvironment - if c.WaitTimer == nil { - c.WaitTimer = Int(0) - } - if c.CanAdminsBypass == nil { - c.CanAdminsBypass = Bool(true) - } - return json.Marshal(&struct { - *Alias - }{ - Alias: (*Alias)(c), - }) -} - -// CreateUpdateEnvironment represents the fields required for the create/update operation -// following the Create/Update release example. -// See https://github.com/google/go-github/issues/992 for more information. -// Removed omitempty here as the API expects null values for reviewers and deployment_branch_policy to clear them. -type CreateUpdateEnvironment struct { - WaitTimer *int `json:"wait_timer"` - Reviewers []*EnvReviewers `json:"reviewers"` - CanAdminsBypass *bool `json:"can_admins_bypass"` - DeploymentBranchPolicy *BranchPolicy `json:"deployment_branch_policy"` -} - -// createUpdateEnvironmentNoEnterprise represents the fields accepted for Pro/Teams private repos. -// Ref: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment -// See https://github.com/google/go-github/issues/2602 for more information. -type createUpdateEnvironmentNoEnterprise struct { - DeploymentBranchPolicy *BranchPolicy `json:"deployment_branch_policy"` -} - -// CreateUpdateEnvironment create or update a new environment for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#create-or-update-an-environment -func (s *RepositoriesService) CreateUpdateEnvironment(ctx context.Context, owner, repo, name string, environment *CreateUpdateEnvironment) (*Environment, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/environments/%s", owner, repo, name) - - req, err := s.client.NewRequest("PUT", u, environment) - if err != nil { - return nil, nil, err - } - - e := new(Environment) - resp, err := s.client.Do(ctx, req, e) - if err != nil { - // The API returns 422 when the pricing plan doesn't support all the fields sent. - // This path will be executed for Pro/Teams private repos. - // For public repos, regardless of the pricing plan, all fields supported. - // For Free plan private repos the returned error code is 404. - // We are checking that the user didn't try to send a value for unsupported fields, - // and return an error if they did. - if resp != nil && resp.StatusCode == http.StatusUnprocessableEntity && environment != nil && len(environment.Reviewers) == 0 && environment.GetWaitTimer() == 0 { - return s.createNewEnvNoEnterprise(ctx, u, environment) - } - return nil, resp, err - } - return e, resp, nil -} - -// createNewEnvNoEnterprise is an internal function for cases where the original call returned 422. -// Currently only the `deployment_branch_policy` parameter is supported for Pro/Team private repos. -func (s *RepositoriesService) createNewEnvNoEnterprise(ctx context.Context, u string, environment *CreateUpdateEnvironment) (*Environment, *Response, error) { - req, err := s.client.NewRequest("PUT", u, &createUpdateEnvironmentNoEnterprise{ - DeploymentBranchPolicy: environment.DeploymentBranchPolicy, - }) - if err != nil { - return nil, nil, err - } - - e := new(Environment) - resp, err := s.client.Do(ctx, req, e) - if err != nil { - return nil, resp, err - } - return e, resp, nil -} - -// DeleteEnvironment delete an environment from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#delete-an-environment -func (s *RepositoriesService) DeleteEnvironment(ctx context.Context, owner, repo, name string) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/environments/%s", owner, repo, name) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_forks.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_forks.go deleted file mode 100644 index f175dfe3b0..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_forks.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" -) - -// RepositoryListForksOptions specifies the optional parameters to the -// RepositoriesService.ListForks method. -type RepositoryListForksOptions struct { - // How to sort the forks list. Possible values are: newest, oldest, - // watchers. Default is "newest". - Sort string `url:"sort,omitempty"` - - ListOptions -} - -// ListForks lists the forks of the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/forks#list-forks -func (s *RepositoriesService) ListForks(ctx context.Context, owner, repo string, opts *RepositoryListForksOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when topics API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// RepositoryCreateForkOptions specifies the optional parameters to the -// RepositoriesService.CreateFork method. -type RepositoryCreateForkOptions struct { - // The organization to fork the repository into. - Organization string `json:"organization,omitempty"` - Name string `json:"name,omitempty"` - DefaultBranchOnly bool `json:"default_branch_only,omitempty"` -} - -// CreateFork creates a fork of the specified repository. -// -// This method might return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing creating the fork in a background task. In this event, -// the Repository value will be returned, which includes the details about the pending fork. -// A follow up request, after a delay of a second or so, should result -// in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/forks#create-a-fork -func (s *RepositoriesService) CreateFork(ctx context.Context, owner, repo string, opts *RepositoryCreateForkOptions) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) - - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - fork := new(Repository) - resp, err := s.client.Do(ctx, req, fork) - if err != nil { - // Persist AcceptedError's metadata to the Repository object. - if aerr, ok := err.(*AcceptedError); ok { - if err := json.Unmarshal(aerr.Raw, fork); err != nil { - return fork, resp, err - } - - return fork, resp, err - } - return nil, resp, err - } - - return fork, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_hooks.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_hooks.go deleted file mode 100644 index ba229e7bca..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_hooks.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" -) - -// WebHookPayload represents the data that is received from GitHub when a push -// event hook is triggered. The format of these payloads pre-date most of the -// GitHub v3 API, so there are lots of minor incompatibilities with the types -// defined in the rest of the API. Therefore, several types are duplicated -// here to account for these differences. -// -// GitHub API docs: https://help.github.com/articles/post-receive-hooks -// -// Deprecated: Please use PushEvent instead. -type WebHookPayload = PushEvent - -// WebHookCommit represents the commit variant we receive from GitHub in a -// WebHookPayload. -// -// Deprecated: Please use HeadCommit instead. -type WebHookCommit = HeadCommit - -// WebHookAuthor represents the author or committer of a commit, as specified -// in a WebHookCommit. The commit author may not correspond to a GitHub User. -// -// Deprecated: Please use CommitAuthor instead. -// NOTE Breaking API change: the `Username` field is now called `Login`. -type WebHookAuthor = CommitAuthor - -// Hook represents a GitHub (web and service) hook for a repository. -type Hook struct { - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - ID *int64 `json:"id,omitempty"` - Type *string `json:"type,omitempty"` - Name *string `json:"name,omitempty"` - TestURL *string `json:"test_url,omitempty"` - PingURL *string `json:"ping_url,omitempty"` - LastResponse map[string]interface{} `json:"last_response,omitempty"` - - // Only the following fields are used when creating a hook. - // Config is required. - Config map[string]interface{} `json:"config,omitempty"` - Events []string `json:"events,omitempty"` - Active *bool `json:"active,omitempty"` -} - -func (h Hook) String() string { - return Stringify(h) -} - -// createHookRequest is a subset of Hook and is used internally -// by CreateHook to pass only the known fields for the endpoint. -// -// See https://github.com/google/go-github/issues/1015 for more -// information. -type createHookRequest struct { - // Config is required. - Name string `json:"name"` - Config map[string]interface{} `json:"config,omitempty"` - Events []string `json:"events,omitempty"` - Active *bool `json:"active,omitempty"` -} - -// CreateHook creates a Hook for the specified repository. -// Config is a required field. -// -// Note that only a subset of the hook fields are used and hook must -// not be nil. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#create-a-repository-webhook -func (s *RepositoriesService) CreateHook(ctx context.Context, owner, repo string, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) - - hookReq := &createHookRequest{ - Name: "web", - Events: hook.Events, - Active: hook.Active, - Config: hook.Config, - } - - req, err := s.client.NewRequest("POST", u, hookReq) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// ListHooks lists all Hooks for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#list-repository-webhooks -func (s *RepositoriesService) ListHooks(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var hooks []*Hook - resp, err := s.client.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} - -// GetHook returns a single specified Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#get-a-repository-webhook -func (s *RepositoriesService) GetHook(ctx context.Context, owner, repo string, id int64) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// EditHook updates a specified Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#update-a-repository-webhook -func (s *RepositoriesService) EditHook(ctx context.Context, owner, repo string, id int64, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - h := new(Hook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// DeleteHook deletes a specified Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#delete-a-repository-webhook -func (s *RepositoriesService) DeleteHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// PingHook triggers a 'ping' event to be sent to the Hook. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#ping-a-repository-webhook -func (s *RepositoriesService) PingHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d/pings", owner, repo, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// TestHook triggers a test Hook by github. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#test-the-push-repository-webhook -func (s *RepositoriesService) TestHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d/tests", owner, repo, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// Subscribe lets servers register to receive updates when a topic is updated. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks#pubsubhubbub -func (s *RepositoriesService) Subscribe(ctx context.Context, owner, repo, event, callback string, secret []byte) (*Response, error) { - req, err := s.createWebSubRequest("subscribe", owner, repo, event, callback, secret) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unsubscribe lets servers unregister to no longer receive updates when a topic is updated. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks#pubsubhubbub -func (s *RepositoriesService) Unsubscribe(ctx context.Context, owner, repo, event, callback string, secret []byte) (*Response, error) { - req, err := s.createWebSubRequest("unsubscribe", owner, repo, event, callback, secret) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// createWebSubRequest returns a subscribe/unsubscribe request that implements -// the WebSub (formerly PubSubHubbub) protocol. -// -// See: https://www.w3.org/TR/websub/#subscriber-sends-subscription-request -func (s *RepositoriesService) createWebSubRequest(hubMode, owner, repo, event, callback string, secret []byte) (*http.Request, error) { - topic := fmt.Sprintf( - "https://github.com/%s/%s/events/%s", - owner, - repo, - event, - ) - form := url.Values{} - form.Add("hub.mode", hubMode) - form.Add("hub.topic", topic) - form.Add("hub.callback", callback) - if secret != nil { - form.Add("hub.secret", string(secret)) - } - body := strings.NewReader(form.Encode()) - - req, err := s.client.NewFormRequest("hub", body) - if err != nil { - return nil, err - } - - return req, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_hooks_deliveries.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_hooks_deliveries.go deleted file mode 100644 index cbd2d3819a..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_hooks_deliveries.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" -) - -// HookDelivery represents the data that is received from GitHub's Webhook Delivery API -// -// GitHub API docs: -// - https://docs.github.com/en/rest/webhooks/repo-deliveries#list-deliveries-for-a-repository-webhook -// - https://docs.github.com/en/rest/webhooks/repo-deliveries#get-a-delivery-for-a-repository-webhook -type HookDelivery struct { - ID *int64 `json:"id,omitempty"` - GUID *string `json:"guid,omitempty"` - DeliveredAt *Timestamp `json:"delivered_at,omitempty"` - Redelivery *bool `json:"redelivery,omitempty"` - Duration *float64 `json:"duration,omitempty"` - Status *string `json:"status,omitempty"` - StatusCode *int `json:"status_code,omitempty"` - Event *string `json:"event,omitempty"` - Action *string `json:"action,omitempty"` - InstallationID *int64 `json:"installation_id,omitempty"` - RepositoryID *int64 `json:"repository_id,omitempty"` - - // Request is populated by GetHookDelivery. - Request *HookRequest `json:"request,omitempty"` - // Response is populated by GetHookDelivery. - Response *HookResponse `json:"response,omitempty"` -} - -func (d HookDelivery) String() string { - return Stringify(d) -} - -// HookRequest is a part of HookDelivery that contains -// the HTTP headers and the JSON payload of the webhook request. -type HookRequest struct { - Headers map[string]string `json:"headers,omitempty"` - RawPayload *json.RawMessage `json:"payload,omitempty"` -} - -func (r HookRequest) String() string { - return Stringify(r) -} - -// HookResponse is a part of HookDelivery that contains -// the HTTP headers and the response body served by the webhook endpoint. -type HookResponse struct { - Headers map[string]string `json:"headers,omitempty"` - RawPayload *json.RawMessage `json:"payload,omitempty"` -} - -func (r HookResponse) String() string { - return Stringify(r) -} - -// ListHookDeliveries lists webhook deliveries for a webhook configured in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repo-deliveries#list-deliveries-for-a-repository-webhook -func (s *RepositoriesService) ListHookDeliveries(ctx context.Context, owner, repo string, id int64, opts *ListCursorOptions) ([]*HookDelivery, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%v/deliveries", owner, repo, id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - deliveries := []*HookDelivery{} - resp, err := s.client.Do(ctx, req, &deliveries) - if err != nil { - return nil, resp, err - } - - return deliveries, resp, nil -} - -// GetHookDelivery returns a delivery for a webhook configured in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repo-deliveries#get-a-delivery-for-a-repository-webhook -func (s *RepositoriesService) GetHookDelivery(ctx context.Context, owner, repo string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%v/deliveries/%v", owner, repo, hookID, deliveryID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - h := new(HookDelivery) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// RedeliverHookDelivery redelivers a delivery for a webhook configured in a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/webhooks/repo-deliveries#redeliver-a-delivery-for-a-repository-webhook -func (s *RepositoriesService) RedeliverHookDelivery(ctx context.Context, owner, repo string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%v/deliveries/%v/attempts", owner, repo, hookID, deliveryID) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - h := new(HookDelivery) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// ParseRequestPayload parses the request payload. For recognized event types, -// a value of the corresponding struct type will be returned. -func (d *HookDelivery) ParseRequestPayload() (interface{}, error) { - eType, ok := eventTypeMapping[*d.Event] - if !ok { - return nil, fmt.Errorf("unsupported event type %q", *d.Event) - } - - e := &Event{Type: &eType, RawPayload: d.Request.RawPayload} - return e.ParsePayload() -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_invitations.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_invitations.go deleted file mode 100644 index 81956cd49c..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_invitations.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepositoryInvitation represents an invitation to collaborate on a repo. -type RepositoryInvitation struct { - ID *int64 `json:"id,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Invitee *User `json:"invitee,omitempty"` - Inviter *User `json:"inviter,omitempty"` - - // Permissions represents the permissions that the associated user will have - // on the repository. Possible values are: "read", "write", "admin". - Permissions *string `json:"permissions,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` -} - -// ListInvitations lists all currently-open repository invitations. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#list-repository-invitations -func (s *RepositoriesService) ListInvitations(ctx context.Context, owner, repo string, opts *ListOptions) ([]*RepositoryInvitation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/invitations", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - invites := []*RepositoryInvitation{} - resp, err := s.client.Do(ctx, req, &invites) - if err != nil { - return nil, resp, err - } - - return invites, resp, nil -} - -// DeleteInvitation deletes a repository invitation. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#delete-a-repository-invitation -func (s *RepositoriesService) DeleteInvitation(ctx context.Context, owner, repo string, invitationID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/invitations/%v", owner, repo, invitationID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// UpdateInvitation updates the permissions associated with a repository -// invitation. -// -// permissions represents the permissions that the associated user will have -// on the repository. Possible values are: "read", "write", "admin". -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#update-a-repository-invitation -func (s *RepositoriesService) UpdateInvitation(ctx context.Context, owner, repo string, invitationID int64, permissions string) (*RepositoryInvitation, *Response, error) { - opts := &struct { - Permissions string `json:"permissions"` - }{Permissions: permissions} - u := fmt.Sprintf("repos/%v/%v/invitations/%v", owner, repo, invitationID) - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - invite := &RepositoryInvitation{} - resp, err := s.client.Do(ctx, req, invite) - if err != nil { - return nil, resp, err - } - - return invite, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_keys.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_keys.go deleted file mode 100644 index 42c5de4970..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_keys.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// The Key type is defined in users_keys.go - -// ListKeys lists the deploy keys for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#list-deploy-keys -func (s *RepositoriesService) ListKeys(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var keys []*Key - resp, err := s.client.Do(ctx, req, &keys) - if err != nil { - return nil, resp, err - } - - return keys, resp, nil -} - -// GetKey fetches a single deploy key. -// -// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#get-a-deploy-key -func (s *RepositoriesService) GetKey(ctx context.Context, owner string, repo string, id int64) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := new(Key) - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// CreateKey adds a deploy key for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#create-a-deploy-key -func (s *RepositoriesService) CreateKey(ctx context.Context, owner string, repo string, key *Key) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) - - req, err := s.client.NewRequest("POST", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(ctx, req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteKey deletes a deploy key. -// -// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#delete-a-deploy-key -func (s *RepositoriesService) DeleteKey(ctx context.Context, owner string, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_lfs.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_lfs.go deleted file mode 100644 index 6ac2e5a877..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_lfs.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// EnableLFS turns the LFS (Large File Storage) feature ON for the selected repo. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/lfs#enable-git-lfs-for-a-repository -func (s *RepositoriesService) EnableLFS(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/lfs", owner, repo) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// DisableLFS turns the LFS (Large File Storage) feature OFF for the selected repo. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/lfs#disable-git-lfs-for-a-repository -func (s *RepositoriesService) DisableLFS(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/lfs", owner, repo) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_merging.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_merging.go deleted file mode 100644 index 66e88452e8..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_merging.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepositoryMergeRequest represents a request to merge a branch in a -// repository. -type RepositoryMergeRequest struct { - Base *string `json:"base,omitempty"` - Head *string `json:"head,omitempty"` - CommitMessage *string `json:"commit_message,omitempty"` -} - -// RepoMergeUpstreamRequest represents a request to sync a branch of -// a forked repository to keep it up-to-date with the upstream repository. -type RepoMergeUpstreamRequest struct { - Branch *string `json:"branch,omitempty"` -} - -// RepoMergeUpstreamResult represents the result of syncing a branch of -// a forked repository with the upstream repository. -type RepoMergeUpstreamResult struct { - Message *string `json:"message,omitempty"` - MergeType *string `json:"merge_type,omitempty"` - BaseBranch *string `json:"base_branch,omitempty"` -} - -// Merge a branch in the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branches#merge-a-branch -func (s *RepositoriesService) Merge(ctx context.Context, owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/merges", owner, repo) - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - commit := new(RepositoryCommit) - resp, err := s.client.Do(ctx, req, commit) - if err != nil { - return nil, resp, err - } - - return commit, resp, nil -} - -// MergeUpstream syncs a branch of a forked repository to keep it up-to-date -// with the upstream repository. -// -// GitHub API docs: https://docs.github.com/en/rest/branches/branches#sync-a-fork-branch-with-the-upstream-repository -func (s *RepositoriesService) MergeUpstream(ctx context.Context, owner, repo string, request *RepoMergeUpstreamRequest) (*RepoMergeUpstreamResult, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/merge-upstream", owner, repo) - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - result := new(RepoMergeUpstreamResult) - resp, err := s.client.Do(ctx, req, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_pages.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_pages.go deleted file mode 100644 index 83075dbdd2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_pages.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Pages represents a GitHub Pages site configuration. -type Pages struct { - URL *string `json:"url,omitempty"` - Status *string `json:"status,omitempty"` - CNAME *string `json:"cname,omitempty"` - Custom404 *bool `json:"custom_404,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - BuildType *string `json:"build_type,omitempty"` - Source *PagesSource `json:"source,omitempty"` - Public *bool `json:"public,omitempty"` - HTTPSCertificate *PagesHTTPSCertificate `json:"https_certificate,omitempty"` - HTTPSEnforced *bool `json:"https_enforced,omitempty"` -} - -// PagesSource represents a GitHub page's source. -type PagesSource struct { - Branch *string `json:"branch,omitempty"` - Path *string `json:"path,omitempty"` -} - -// PagesError represents a build error for a GitHub Pages site. -type PagesError struct { - Message *string `json:"message,omitempty"` -} - -// PagesBuild represents the build information for a GitHub Pages site. -type PagesBuild struct { - URL *string `json:"url,omitempty"` - Status *string `json:"status,omitempty"` - Error *PagesError `json:"error,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Commit *string `json:"commit,omitempty"` - Duration *int `json:"duration,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -// PagesDomain represents a domain associated with a GitHub Pages site. -type PagesDomain struct { - Host *string `json:"host,omitempty"` - URI *string `json:"uri,omitempty"` - Nameservers *string `json:"nameservers,omitempty"` - DNSResolves *bool `json:"dns_resolves,omitempty"` - IsProxied *bool `json:"is_proxied,omitempty"` - IsCloudflareIP *bool `json:"is_cloudflare_ip,omitempty"` - IsFastlyIP *bool `json:"is_fastly_ip,omitempty"` - IsOldIPAddress *bool `json:"is_old_ip_address,omitempty"` - IsARecord *bool `json:"is_a_record,omitempty"` - HasCNAMERecord *bool `json:"has_cname_record,omitempty"` - HasMXRecordsPresent *bool `json:"has_mx_records_present,omitempty"` - IsValidDomain *bool `json:"is_valid_domain,omitempty"` - IsApexDomain *bool `json:"is_apex_domain,omitempty"` - ShouldBeARecord *bool `json:"should_be_a_record,omitempty"` - IsCNAMEToGithubUserDomain *bool `json:"is_cname_to_github_user_domain,omitempty"` - IsCNAMEToPagesDotGithubDotCom *bool `json:"is_cname_to_pages_dot_github_dot_com,omitempty"` - IsCNAMEToFastly *bool `json:"is_cname_to_fastly,omitempty"` - IsPointedToGithubPagesIP *bool `json:"is_pointed_to_github_pages_ip,omitempty"` - IsNonGithubPagesIPPresent *bool `json:"is_non_github_pages_ip_present,omitempty"` - IsPagesDomain *bool `json:"is_pages_domain,omitempty"` - IsServedByPages *bool `json:"is_served_by_pages,omitempty"` - IsValid *bool `json:"is_valid,omitempty"` - Reason *string `json:"reason,omitempty"` - RespondsToHTTPS *bool `json:"responds_to_https,omitempty"` - EnforcesHTTPS *bool `json:"enforces_https,omitempty"` - HTTPSError *string `json:"https_error,omitempty"` - IsHTTPSEligible *bool `json:"is_https_eligible,omitempty"` - CAAError *string `json:"caa_error,omitempty"` -} - -// PagesHealthCheckResponse represents the response given for the health check of a GitHub Pages site. -type PagesHealthCheckResponse struct { - Domain *PagesDomain `json:"domain,omitempty"` - AltDomain *PagesDomain `json:"alt_domain,omitempty"` -} - -// PagesHTTPSCertificate represents the HTTPS Certificate information for a GitHub Pages site. -type PagesHTTPSCertificate struct { - State *string `json:"state,omitempty"` - Description *string `json:"description,omitempty"` - Domains []string `json:"domains,omitempty"` - // GitHub's API doesn't return a standard Timestamp, rather it returns a YYYY-MM-DD string. - ExpiresAt *string `json:"expires_at,omitempty"` -} - -// createPagesRequest is a subset of Pages and is used internally -// by EnablePages to pass only the known fields for the endpoint. -type createPagesRequest struct { - BuildType *string `json:"build_type,omitempty"` - Source *PagesSource `json:"source,omitempty"` -} - -// EnablePages enables GitHub Pages for the named repo. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#create-a-github-pages-site -func (s *RepositoriesService) EnablePages(ctx context.Context, owner, repo string, pages *Pages) (*Pages, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) - - pagesReq := &createPagesRequest{ - BuildType: pages.BuildType, - Source: pages.Source, - } - - req, err := s.client.NewRequest("POST", u, pagesReq) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeEnablePagesAPIPreview) - - enable := new(Pages) - resp, err := s.client.Do(ctx, req, enable) - if err != nil { - return nil, resp, err - } - - return enable, resp, nil -} - -// PagesUpdate sets up parameters needed to update a GitHub Pages site. -type PagesUpdate struct { - // CNAME represents a custom domain for the repository. - // Leaving CNAME empty will remove the custom domain. - CNAME *string `json:"cname"` - // BuildType is optional and can either be "legacy" or "workflow". - // "workflow" - You are using a github workflow to build your pages. - // "legacy" - You are deploying from a branch. - BuildType *string `json:"build_type,omitempty"` - // Source must include the branch name, and may optionally specify the subdirectory "/docs". - // Possible values for Source.Branch are usually "gh-pages", "main", and "master", - // or any other existing branch name. - // Possible values for Source.Path are: "/", and "/docs". - Source *PagesSource `json:"source,omitempty"` - // Public configures access controls for the site. - // If "true", the site will be accessible to anyone on the internet. If "false", - // the site will be accessible to anyone with read access to the repository that - // published the site. - Public *bool `json:"public,omitempty"` - // HTTPSEnforced specifies whether HTTPS should be enforced for the repository. - HTTPSEnforced *bool `json:"https_enforced,omitempty"` -} - -// UpdatePages updates GitHub Pages for the named repo. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#update-information-about-a-github-pages-site -func (s *RepositoriesService) UpdatePages(ctx context.Context, owner, repo string, opts *PagesUpdate) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) - - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// DisablePages disables GitHub Pages for the named repo. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#delete-a-github-pages-site -func (s *RepositoriesService) DisablePages(ctx context.Context, owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeEnablePagesAPIPreview) - - return s.client.Do(ctx, req, nil) -} - -// GetPagesInfo fetches information about a GitHub Pages site. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#get-a-github-pages-site -func (s *RepositoriesService) GetPagesInfo(ctx context.Context, owner, repo string) (*Pages, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - site := new(Pages) - resp, err := s.client.Do(ctx, req, site) - if err != nil { - return nil, resp, err - } - - return site, resp, nil -} - -// ListPagesBuilds lists the builds for a GitHub Pages site. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#list-github-pages-builds -func (s *RepositoriesService) ListPagesBuilds(ctx context.Context, owner, repo string, opts *ListOptions) ([]*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pages []*PagesBuild - resp, err := s.client.Do(ctx, req, &pages) - if err != nil { - return nil, resp, err - } - - return pages, resp, nil -} - -// GetLatestPagesBuild fetches the latest build information for a GitHub pages site. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#get-latest-pages-build -func (s *RepositoriesService) GetLatestPagesBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - build := new(PagesBuild) - resp, err := s.client.Do(ctx, req, build) - if err != nil { - return nil, resp, err - } - - return build, resp, nil -} - -// GetPageBuild fetches the specific build information for a GitHub pages site. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#get-github-pages-build -func (s *RepositoriesService) GetPageBuild(ctx context.Context, owner, repo string, id int64) (*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds/%v", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - build := new(PagesBuild) - resp, err := s.client.Do(ctx, req, build) - if err != nil { - return nil, resp, err - } - - return build, resp, nil -} - -// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#request-a-github-pages-build -func (s *RepositoriesService) RequestPageBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - build := new(PagesBuild) - resp, err := s.client.Do(ctx, req, build) - if err != nil { - return nil, resp, err - } - - return build, resp, nil -} - -// GetPagesHealthCheck gets a DNS health check for the CNAME record configured for a repository's GitHub Pages. -// -// GitHub API docs: https://docs.github.com/en/rest/pages#get-a-dns-health-check-for-github-pages -func (s *RepositoriesService) GetPageHealthCheck(ctx context.Context, owner, repo string) (*PagesHealthCheckResponse, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/health", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - healthCheckResponse := new(PagesHealthCheckResponse) - resp, err := s.client.Do(ctx, req, healthCheckResponse) - if err != nil { - return nil, resp, err - } - - return healthCheckResponse, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_prereceive_hooks.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_prereceive_hooks.go deleted file mode 100644 index 1ce6478d33..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_prereceive_hooks.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// PreReceiveHook represents a GitHub pre-receive hook for a repository. -type PreReceiveHook struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Enforcement *string `json:"enforcement,omitempty"` - ConfigURL *string `json:"configuration_url,omitempty"` -} - -func (p PreReceiveHook) String() string { - return Stringify(p) -} - -// ListPreReceiveHooks lists all pre-receive hooks for the specified repository. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#list-pre-receive-hooks -func (s *RepositoriesService) ListPreReceiveHooks(ctx context.Context, owner, repo string, opts *ListOptions) ([]*PreReceiveHook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - var hooks []*PreReceiveHook - resp, err := s.client.Do(ctx, req, &hooks) - if err != nil { - return nil, resp, err - } - - return hooks, resp, nil -} - -// GetPreReceiveHook returns a single specified pre-receive hook. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#get-a-single-pre-receive-hook -func (s *RepositoriesService) GetPreReceiveHook(ctx context.Context, owner, repo string, id int64) (*PreReceiveHook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - h := new(PreReceiveHook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// UpdatePreReceiveHook updates a specified pre-receive hook. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#update-pre-receive-hook-enforcement -func (s *RepositoriesService) UpdatePreReceiveHook(ctx context.Context, owner, repo string, id int64, hook *PreReceiveHook) (*PreReceiveHook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - h := new(PreReceiveHook) - resp, err := s.client.Do(ctx, req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// DeletePreReceiveHook deletes a specified pre-receive hook. -// -// GitHub API docs: https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/#remove-enforcement-overrides-for-a-pre-receive-hook -func (s *RepositoriesService) DeletePreReceiveHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/pre-receive-hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypePreReceiveHooksPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_projects.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_projects.go deleted file mode 100644 index a3001dee98..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_projects.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ProjectListOptions specifies the optional parameters to the -// OrganizationsService.ListProjects and RepositoriesService.ListProjects methods. -type ProjectListOptions struct { - // Indicates the state of the projects to return. Can be either open, closed, or all. Default: open - State string `url:"state,omitempty"` - - ListOptions -} - -// ListProjects lists the projects for a repo. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-repository-projects -func (s *RepositoriesService) ListProjects(ctx context.Context, owner, repo string, opts *ProjectListOptions) ([]*Project, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/projects", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - var projects []*Project - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// CreateProject creates a GitHub Project for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-a-repository-project -func (s *RepositoriesService) CreateProject(ctx context.Context, owner, repo string, opts *ProjectOptions) (*Project, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/projects", owner, repo) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept headers when APIs fully launch. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_releases.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_releases.go deleted file mode 100644 index 464c2ee1e2..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_releases.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "errors" - "fmt" - "io" - "mime" - "net/http" - "os" - "path/filepath" - "strings" -) - -// RepositoryRelease represents a GitHub release in a repository. -type RepositoryRelease struct { - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Draft *bool `json:"draft,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` - // MakeLatest can be one of: "true", "false", or "legacy". - MakeLatest *string `json:"make_latest,omitempty"` - DiscussionCategoryName *string `json:"discussion_category_name,omitempty"` - - // The following fields are not used in EditRelease: - GenerateReleaseNotes *bool `json:"generate_release_notes,omitempty"` - - // The following fields are not used in CreateRelease or EditRelease: - ID *int64 `json:"id,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PublishedAt *Timestamp `json:"published_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - AssetsURL *string `json:"assets_url,omitempty"` - Assets []*ReleaseAsset `json:"assets,omitempty"` - UploadURL *string `json:"upload_url,omitempty"` - ZipballURL *string `json:"zipball_url,omitempty"` - TarballURL *string `json:"tarball_url,omitempty"` - Author *User `json:"author,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (r RepositoryRelease) String() string { - return Stringify(r) -} - -// RepositoryReleaseNotes represents a GitHub-generated release notes. -type RepositoryReleaseNotes struct { - Name string `json:"name"` - Body string `json:"body"` -} - -// GenerateNotesOptions represents the options to generate release notes. -type GenerateNotesOptions struct { - TagName string `json:"tag_name"` - PreviousTagName *string `json:"previous_tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` -} - -// ReleaseAsset represents a GitHub release asset in a repository. -type ReleaseAsset struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Label *string `json:"label,omitempty"` - State *string `json:"state,omitempty"` - ContentType *string `json:"content_type,omitempty"` - Size *int `json:"size,omitempty"` - DownloadCount *int `json:"download_count,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - BrowserDownloadURL *string `json:"browser_download_url,omitempty"` - Uploader *User `json:"uploader,omitempty"` - NodeID *string `json:"node_id,omitempty"` -} - -func (r ReleaseAsset) String() string { - return Stringify(r) -} - -// ListReleases lists the releases for a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#list-releases -func (s *RepositoriesService) ListReleases(ctx context.Context, owner, repo string, opts *ListOptions) ([]*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var releases []*RepositoryRelease - resp, err := s.client.Do(ctx, req, &releases) - if err != nil { - return nil, resp, err - } - return releases, resp, nil -} - -// GetRelease fetches a single release. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#get-a-release -func (s *RepositoriesService) GetRelease(ctx context.Context, owner, repo string, id int64) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - return s.getSingleRelease(ctx, u) -} - -// GetLatestRelease fetches the latest published release for the repository. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#get-the-latest-release -func (s *RepositoriesService) GetLatestRelease(ctx context.Context, owner, repo string) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/latest", owner, repo) - return s.getSingleRelease(ctx, u) -} - -// GetReleaseByTag fetches a release with the specified tag. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#get-a-release-by-tag-name -func (s *RepositoriesService) GetReleaseByTag(ctx context.Context, owner, repo, tag string) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/tags/%s", owner, repo, tag) - return s.getSingleRelease(ctx, u) -} - -// GenerateReleaseNotes generates the release notes for the given tag. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#generate-release-notes-content-for-a-release -func (s *RepositoriesService) GenerateReleaseNotes(ctx context.Context, owner, repo string, opts *GenerateNotesOptions) (*RepositoryReleaseNotes, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/generate-notes", owner, repo) - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - r := new(RepositoryReleaseNotes) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -func (s *RepositoriesService) getSingleRelease(ctx context.Context, url string) (*RepositoryRelease, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - release := new(RepositoryRelease) - resp, err := s.client.Do(ctx, req, release) - if err != nil { - return nil, resp, err - } - return release, resp, nil -} - -// repositoryReleaseRequest is a subset of RepositoryRelease and -// is used internally by CreateRelease and EditRelease to pass -// only the known fields for these endpoints. -// -// See https://github.com/google/go-github/issues/992 for more -// information. -type repositoryReleaseRequest struct { - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Draft *bool `json:"draft,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` - MakeLatest *string `json:"make_latest,omitempty"` - GenerateReleaseNotes *bool `json:"generate_release_notes,omitempty"` - DiscussionCategoryName *string `json:"discussion_category_name,omitempty"` -} - -// CreateRelease adds a new release for a repository. -// -// Note that only a subset of the release fields are used. -// See RepositoryRelease for more information. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#create-a-release -func (s *RepositoriesService) CreateRelease(ctx context.Context, owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) - - releaseReq := &repositoryReleaseRequest{ - TagName: release.TagName, - TargetCommitish: release.TargetCommitish, - Name: release.Name, - Body: release.Body, - Draft: release.Draft, - Prerelease: release.Prerelease, - MakeLatest: release.MakeLatest, - DiscussionCategoryName: release.DiscussionCategoryName, - GenerateReleaseNotes: release.GenerateReleaseNotes, - } - - req, err := s.client.NewRequest("POST", u, releaseReq) - if err != nil { - return nil, nil, err - } - - r := new(RepositoryRelease) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - return r, resp, nil -} - -// EditRelease edits a repository release. -// -// Note that only a subset of the release fields are used. -// See RepositoryRelease for more information. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#update-a-release -func (s *RepositoriesService) EditRelease(ctx context.Context, owner, repo string, id int64, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - - releaseReq := &repositoryReleaseRequest{ - TagName: release.TagName, - TargetCommitish: release.TargetCommitish, - Name: release.Name, - Body: release.Body, - Draft: release.Draft, - Prerelease: release.Prerelease, - MakeLatest: release.MakeLatest, - DiscussionCategoryName: release.DiscussionCategoryName, - } - - req, err := s.client.NewRequest("PATCH", u, releaseReq) - if err != nil { - return nil, nil, err - } - - r := new(RepositoryRelease) - resp, err := s.client.Do(ctx, req, r) - if err != nil { - return nil, resp, err - } - return r, resp, nil -} - -// DeleteRelease delete a single release from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/releases#delete-a-release -func (s *RepositoriesService) DeleteRelease(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// ListReleaseAssets lists the release's assets. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/assets#list-release-assets -func (s *RepositoriesService) ListReleaseAssets(ctx context.Context, owner, repo string, id int64, opts *ListOptions) ([]*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var assets []*ReleaseAsset - resp, err := s.client.Do(ctx, req, &assets) - if err != nil { - return nil, resp, err - } - return assets, resp, nil -} - -// GetReleaseAsset fetches a single release asset. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/assets#get-a-release-asset -func (s *RepositoriesService) GetReleaseAsset(ctx context.Context, owner, repo string, id int64) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(ctx, req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, nil -} - -// DownloadReleaseAsset downloads a release asset or returns a redirect URL. -// -// DownloadReleaseAsset returns an io.ReadCloser that reads the contents of the -// specified release asset. It is the caller's responsibility to close the ReadCloser. -// If a redirect is returned, the redirect URL will be returned as a string instead -// of the io.ReadCloser. Exactly one of rc and redirectURL will be zero. -// -// followRedirectsClient can be passed to download the asset from a redirected -// location. Passing http.DefaultClient is recommended unless special circumstances -// exist, but it's possible to pass any http.Client. If nil is passed the -// redirectURL will be returned instead. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/assets#get-a-release-asset -func (s *RepositoriesService) DownloadReleaseAsset(ctx context.Context, owner, repo string, id int64, followRedirectsClient *http.Client) (rc io.ReadCloser, redirectURL string, err error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, "", err - } - req.Header.Set("Accept", defaultMediaType) - - s.client.clientMu.Lock() - defer s.client.clientMu.Unlock() - - var loc string - saveRedirect := s.client.client.CheckRedirect - s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - loc = req.URL.String() - return errors.New("disable redirect") - } - defer func() { s.client.client.CheckRedirect = saveRedirect }() - - req = withContext(ctx, req) - resp, err := s.client.client.Do(req) - if err != nil { - if !strings.Contains(err.Error(), "disable redirect") { - return nil, "", err - } - if followRedirectsClient != nil { - rc, err := s.downloadReleaseAssetFromURL(ctx, followRedirectsClient, loc) - return rc, "", err - } - return nil, loc, nil // Intentionally return no error with valid redirect URL. - } - - if err := CheckResponse(resp); err != nil { - resp.Body.Close() - return nil, "", err - } - - return resp.Body, "", nil -} - -func (s *RepositoriesService) downloadReleaseAssetFromURL(ctx context.Context, followRedirectsClient *http.Client, url string) (rc io.ReadCloser, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - req = withContext(ctx, req) - req.Header.Set("Accept", "*/*") - resp, err := followRedirectsClient.Do(req) - if err != nil { - return nil, err - } - if err := CheckResponse(resp); err != nil { - resp.Body.Close() - return nil, err - } - return resp.Body, nil -} - -// EditReleaseAsset edits a repository release asset. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/assets#update-a-release-asset -func (s *RepositoriesService) EditReleaseAsset(ctx context.Context, owner, repo string, id int64, release *ReleaseAsset) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("PATCH", u, release) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(ctx, req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, nil -} - -// DeleteReleaseAsset delete a single release asset from a repository. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/assets#delete-a-release-asset -func (s *RepositoriesService) DeleteReleaseAsset(ctx context.Context, owner, repo string, id int64) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(ctx, req, nil) -} - -// UploadReleaseAsset creates an asset by uploading a file into a release repository. -// To upload assets that cannot be represented by an os.File, call NewUploadRequest directly. -// -// GitHub API docs: https://docs.github.com/en/rest/releases/assets#upload-a-release-asset -func (s *RepositoriesService) UploadReleaseAsset(ctx context.Context, owner, repo string, id int64, opts *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - stat, err := file.Stat() - if err != nil { - return nil, nil, err - } - if stat.IsDir() { - return nil, nil, errors.New("the asset to upload can't be a directory") - } - - mediaType := mime.TypeByExtension(filepath.Ext(file.Name())) - if opts.MediaType != "" { - mediaType = opts.MediaType - } - - req, err := s.client.NewUploadRequest(u, file, stat.Size(), mediaType) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(ctx, req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_rules.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_rules.go deleted file mode 100644 index 9299d3e7f3..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_rules.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" -) - -// BypassActor represents the bypass actors from a ruleset. -type BypassActor struct { - ActorID *int64 `json:"actor_id,omitempty"` - // Possible values for ActorType are: Team, Integration - ActorType *string `json:"actor_type,omitempty"` -} - -// RulesetLink represents a single link object from GitHub ruleset request _links. -type RulesetLink struct { - HRef *string `json:"href,omitempty"` -} - -// RulesetLinks represents the "_links" object in a Ruleset. -type RulesetLinks struct { - Self *RulesetLink `json:"self,omitempty"` -} - -// RulesetRefConditionParameters represents the conditions object for ref_names. -type RulesetRefConditionParameters struct { - Include []string `json:"include"` - Exclude []string `json:"exclude"` -} - -// RulesetRepositoryConditionParameters represents the conditions object for repository_names. -type RulesetRepositoryConditionParameters struct { - Include []string `json:"include,omitempty"` - Exclude []string `json:"exclude,omitempty"` - Protected *bool `json:"protected,omitempty"` -} - -// RulesetCondition represents the conditions object in a ruleset. -type RulesetConditions struct { - RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"` - RepositoryName *RulesetRepositoryConditionParameters `json:"repository_name,omitempty"` -} - -// RulePatternParameters represents the rule pattern parameters. -type RulePatternParameters struct { - Name *string `json:"name,omitempty"` - // If Negate is true, the rule will fail if the pattern matches. - Negate *bool `json:"negate,omitempty"` - // Possible values for Operator are: starts_with, ends_with, contains, regex - Operator string `json:"operator"` - Pattern string `json:"pattern"` -} - -// UpdateAllowsFetchAndMergeRuleParameters represents the update rule parameters. -type UpdateAllowsFetchAndMergeRuleParameters struct { - UpdateAllowsFetchAndMerge bool `json:"update_allows_fetch_and_merge"` -} - -// RequiredDeploymentEnvironmentsRuleParameters represents the required_deployments rule parameters. -type RequiredDeploymentEnvironmentsRuleParameters struct { - RequiredDeploymentEnvironments []string `json:"required_deployment_environments"` -} - -// PullRequestRuleParameters represents the pull_request rule parameters. -type PullRequestRuleParameters struct { - DismissStaleReviewsOnPush bool `json:"dismiss_stale_reviews_on_push"` - RequireCodeOwnerReview bool `json:"require_code_owner_review"` - RequireLastPushApproval bool `json:"require_last_push_approval"` - RequiredApprovingReviewCount int `json:"required_approving_review_count"` - RequiredReviewThreadResolution bool `json:"required_review_thread_resolution"` -} - -// RuleRequiredStatusChecks represents the RequiredStatusChecks for the RequiredStatusChecksRuleParameters object. -type RuleRequiredStatusChecks struct { - Context string `json:"context"` - IntegrationID *int64 `json:"integration_id,omitempty"` -} - -// RequiredStatusChecksRuleParameters represents the required_status_checks rule parameters. -type RequiredStatusChecksRuleParameters struct { - RequiredStatusChecks []RuleRequiredStatusChecks `json:"required_status_checks"` - StrictRequiredStatusChecksPolicy bool `json:"strict_required_status_checks_policy"` -} - -// RepositoryRule represents a GitHub Rule. -type RepositoryRule struct { - Type string `json:"type"` - Parameters *json.RawMessage `json:"parameters,omitempty"` -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// This helps us handle the fact that RepositoryRule parameter field can be of numerous types. -func (r *RepositoryRule) UnmarshalJSON(data []byte) error { - type rule RepositoryRule - var RepositoryRule rule - if err := json.Unmarshal(data, &RepositoryRule); err != nil { - return err - } - - r.Type = RepositoryRule.Type - - switch RepositoryRule.Type { - case "creation", "deletion", "required_linear_history", "required_signatures", "non_fast_forward": - r.Parameters = nil - case "update": - params := UpdateAllowsFetchAndMergeRuleParameters{} - if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { - return err - } - - bytes, _ := json.Marshal(params) - rawParams := json.RawMessage(bytes) - - r.Parameters = &rawParams - case "required_deployments": - params := RequiredDeploymentEnvironmentsRuleParameters{} - if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { - return err - } - - bytes, _ := json.Marshal(params) - rawParams := json.RawMessage(bytes) - - r.Parameters = &rawParams - case "commit_message_pattern", "commit_author_email_pattern", "committer_email_pattern", "branch_name_pattern", "tag_name_pattern": - params := RulePatternParameters{} - if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { - return err - } - - bytes, _ := json.Marshal(params) - rawParams := json.RawMessage(bytes) - - r.Parameters = &rawParams - case "pull_request": - params := PullRequestRuleParameters{} - if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { - return err - } - - bytes, _ := json.Marshal(params) - rawParams := json.RawMessage(bytes) - - r.Parameters = &rawParams - case "required_status_checks": - params := RequiredStatusChecksRuleParameters{} - if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { - return err - } - - bytes, _ := json.Marshal(params) - rawParams := json.RawMessage(bytes) - - r.Parameters = &rawParams - default: - r.Type = "" - r.Parameters = nil - return fmt.Errorf("RepositoryRule.Type %T is not yet implemented, unable to unmarshal", RepositoryRule.Type) - } - - return nil -} - -// NewCreationRule creates a rule to only allow users with bypass permission to create matching refs. -func NewCreationRule() (rule *RepositoryRule) { - return &RepositoryRule{ - Type: "creation", - } -} - -// NewUpdateRule creates a rule to only allow users with bypass permission to update matching refs. -func NewUpdateRule(params *UpdateAllowsFetchAndMergeRuleParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "update", - Parameters: &rawParams, - } -} - -// NewDeletionRule creates a rule to only allow users with bypass permissions to delete matching refs. -func NewDeletionRule() (rule *RepositoryRule) { - return &RepositoryRule{ - Type: "deletion", - } -} - -// NewRequiredLinearHistoryRule creates a rule to prevent merge commits from being pushed to matching branches. -func NewRequiredLinearHistoryRule() (rule *RepositoryRule) { - return &RepositoryRule{ - Type: "required_linear_history", - } -} - -// NewRequiredDeploymentsRule creates a rule to require environments to be successfully deployed before they can be merged into the matching branches. -func NewRequiredDeploymentsRule(params *RequiredDeploymentEnvironmentsRuleParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "required_deployments", - Parameters: &rawParams, - } -} - -// NewRequiredSignaturesRule creates a rule a to require commits pushed to matching branches to have verified signatures. -func NewRequiredSignaturesRule() (rule *RepositoryRule) { - return &RepositoryRule{ - Type: "required_signatures", - } -} - -// NewPullRequestRule creates a rule to require all commits be made to a non-target branch and submitted via a pull request before they can be merged. -func NewPullRequestRule(params *PullRequestRuleParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "pull_request", - Parameters: &rawParams, - } -} - -// NewRequiredStatusChecksRule creates a rule to require which status checks must pass before branches can be merged into a branch rule. -func NewRequiredStatusChecksRule(params *RequiredStatusChecksRuleParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "required_status_checks", - Parameters: &rawParams, - } -} - -// NewNonFastForwardRule creates a rule as part to prevent users with push access from force pushing to matching branches. -func NewNonFastForwardRule() (rule *RepositoryRule) { - return &RepositoryRule{ - Type: "non_fast_forward", - } -} - -// NewCommitMessagePatternRule creates a rule to restrict commit message patterns being pushed to matching branches. -func NewCommitMessagePatternRule(params *RulePatternParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "commit_message_pattern", - Parameters: &rawParams, - } -} - -// NewCommitAuthorEmailPatternRule creates a rule to restrict commits with author email patterns being merged into matching branches. -func NewCommitAuthorEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "commit_author_email_pattern", - Parameters: &rawParams, - } -} - -// NewCommitterEmailPatternRule creates a rule to restrict commits with committer email patterns being merged into matching branches. -func NewCommitterEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "committer_email_pattern", - Parameters: &rawParams, - } -} - -// NewBranchNamePatternRule creates a rule to restrict branch patterns from being merged into matching branches. -func NewBranchNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "branch_name_pattern", - Parameters: &rawParams, - } -} - -// NewTagNamePatternRule creates a rule to restrict tag patterns contained in non-target branches from being merged into matching branches. -func NewTagNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) { - bytes, _ := json.Marshal(params) - - rawParams := json.RawMessage(bytes) - - return &RepositoryRule{ - Type: "tag_name_pattern", - Parameters: &rawParams, - } -} - -// Ruleset represents a GitHub ruleset object. -type Ruleset struct { - ID int64 `json:"id"` - Name string `json:"name"` - // Possible values for Target are branch, tag - Target *string `json:"target,omitempty"` - // Possible values for SourceType are: Repository, Organization - SourceType *string `json:"source_type,omitempty"` - Source string `json:"source"` - // Possible values for Enforcement are: disabled, active, evaluate - Enforcement string `json:"enforcement"` - // Possible values for BypassMode are: none, repository, organization - BypassMode *string `json:"bypass_mode,omitempty"` - BypassActors []*BypassActor `json:"bypass_actors,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Links *RulesetLinks `json:"_links,omitempty"` - Conditions *RulesetConditions `json:"conditions,omitempty"` - Rules []*RepositoryRule `json:"rules,omitempty"` -} - -// GetRulesForBranch gets all the rules that apply to the specified branch. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-rules-for-a-branch -func (s *RepositoriesService) GetRulesForBranch(ctx context.Context, owner, repo, branch string) ([]*RepositoryRule, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/rules/branches/%v", owner, repo, branch) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rules []*RepositoryRule - resp, err := s.client.Do(ctx, req, &rules) - if err != nil { - return nil, resp, err - } - - return rules, resp, nil -} - -// GetAllRulesets gets all the rules that apply to the specified repository. -// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-all-repository-rulesets -func (s *RepositoriesService) GetAllRulesets(ctx context.Context, owner, repo string, includesParents bool) ([]*Ruleset, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/rulesets?includes_parents=%v", owner, repo, includesParents) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var ruleset []*Ruleset - resp, err := s.client.Do(ctx, req, &ruleset) - if err != nil { - return nil, resp, err - } - - return ruleset, resp, nil -} - -// CreateRuleset creates a ruleset for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/rules#create-a-repository-ruleset -func (s *RepositoriesService) CreateRuleset(ctx context.Context, owner, repo string, rs *Ruleset) (*Ruleset, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/rulesets", owner, repo) - - req, err := s.client.NewRequest("POST", u, rs) - if err != nil { - return nil, nil, err - } - - var ruleset *Ruleset - resp, err := s.client.Do(ctx, req, &ruleset) - if err != nil { - return nil, resp, err - } - - return ruleset, resp, nil -} - -// GetRuleset gets a ruleset for the specified repository. -// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-a-repository-ruleset -func (s *RepositoriesService) GetRuleset(ctx context.Context, owner, repo string, rulesetID int64, includesParents bool) (*Ruleset, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/rulesets/%v?includes_parents=%v", owner, repo, rulesetID, includesParents) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var ruleset *Ruleset - resp, err := s.client.Do(ctx, req, &ruleset) - if err != nil { - return nil, resp, err - } - - return ruleset, resp, nil -} - -// UpdateRuleset updates a ruleset for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/rules#update-a-repository-ruleset -func (s *RepositoriesService) UpdateRuleset(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID) - - req, err := s.client.NewRequest("PUT", u, rs) - if err != nil { - return nil, nil, err - } - - var ruleset *Ruleset - resp, err := s.client.Do(ctx, req, &ruleset) - if err != nil { - return nil, resp, err - } - - return ruleset, resp, nil -} - -// DeleteRuleset deletes a ruleset for the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/rules#delete-a-repository-ruleset -func (s *RepositoriesService) DeleteRuleset(ctx context.Context, owner, repo string, rulesetID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_stats.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_stats.go deleted file mode 100644 index 3df0a8f6de..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_stats.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "time" -) - -// ContributorStats represents a contributor to a repository and their -// weekly contributions to a given repo. -type ContributorStats struct { - Author *Contributor `json:"author,omitempty"` - Total *int `json:"total,omitempty"` - Weeks []*WeeklyStats `json:"weeks,omitempty"` -} - -func (c ContributorStats) String() string { - return Stringify(c) -} - -// WeeklyStats represents the number of additions, deletions and commits -// a Contributor made in a given week. -type WeeklyStats struct { - Week *Timestamp `json:"w,omitempty"` - Additions *int `json:"a,omitempty"` - Deletions *int `json:"d,omitempty"` - Commits *int `json:"c,omitempty"` -} - -func (w WeeklyStats) String() string { - return Stringify(w) -} - -// ListContributorsStats gets a repo's contributor list with additions, -// deletions and commit counts. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-all-contributor-commit-activity -func (s *RepositoriesService) ListContributorsStats(ctx context.Context, owner, repo string) ([]*ContributorStats, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/contributors", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var contributorStats []*ContributorStats - resp, err := s.client.Do(ctx, req, &contributorStats) - if err != nil { - return nil, resp, err - } - - return contributorStats, resp, nil -} - -// WeeklyCommitActivity represents the weekly commit activity for a repository. -// The days array is a group of commits per day, starting on Sunday. -type WeeklyCommitActivity struct { - Days []int `json:"days,omitempty"` - Total *int `json:"total,omitempty"` - Week *Timestamp `json:"week,omitempty"` -} - -func (w WeeklyCommitActivity) String() string { - return Stringify(w) -} - -// ListCommitActivity returns the last year of commit activity -// grouped by week. The days array is a group of commits per day, -// starting on Sunday. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-last-year-of-commit-activity -func (s *RepositoriesService) ListCommitActivity(ctx context.Context, owner, repo string) ([]*WeeklyCommitActivity, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/commit_activity", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var weeklyCommitActivity []*WeeklyCommitActivity - resp, err := s.client.Do(ctx, req, &weeklyCommitActivity) - if err != nil { - return nil, resp, err - } - - return weeklyCommitActivity, resp, nil -} - -// ListCodeFrequency returns a weekly aggregate of the number of additions and -// deletions pushed to a repository. Returned WeeklyStats will contain -// additions and deletions, but not total commits. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-weekly-commit-activity -func (s *RepositoriesService) ListCodeFrequency(ctx context.Context, owner, repo string) ([]*WeeklyStats, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var weeks [][]int - resp, err := s.client.Do(ctx, req, &weeks) - if err != nil { - return nil, resp, err - } - - // convert int slices into WeeklyStats - var stats []*WeeklyStats - for _, week := range weeks { - if len(week) != 3 { - continue - } - stat := &WeeklyStats{ - Week: &Timestamp{time.Unix(int64(week[0]), 0)}, - Additions: Int(week[1]), - Deletions: Int(week[2]), - } - stats = append(stats, stat) - } - - return stats, resp, nil -} - -// RepositoryParticipation is the number of commits by everyone -// who has contributed to the repository (including the owner) -// as well as the number of commits by the owner themself. -type RepositoryParticipation struct { - All []int `json:"all,omitempty"` - Owner []int `json:"owner,omitempty"` -} - -func (r RepositoryParticipation) String() string { - return Stringify(r) -} - -// ListParticipation returns the total commit counts for the 'owner' -// and total commit counts in 'all'. 'all' is everyone combined, -// including the 'owner' in the last 52 weeks. If you’d like to get -// the commit counts for non-owners, you can subtract 'all' from 'owner'. -// -// The array order is oldest week (index 0) to most recent week. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-weekly-commit-count -func (s *RepositoriesService) ListParticipation(ctx context.Context, owner, repo string) (*RepositoryParticipation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/participation", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - participation := new(RepositoryParticipation) - resp, err := s.client.Do(ctx, req, participation) - if err != nil { - return nil, resp, err - } - - return participation, resp, nil -} - -// PunchCard represents the number of commits made during a given hour of a -// day of the week. -type PunchCard struct { - Day *int // Day of the week (0-6: =Sunday - Saturday). - Hour *int // Hour of day (0-23). - Commits *int // Number of commits. -} - -// ListPunchCard returns the number of commits per hour in each day. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return an *AcceptedError and a status code of -// 202. This is because this is the status that GitHub returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-hourly-commit-count-for-each-day -func (s *RepositoriesService) ListPunchCard(ctx context.Context, owner, repo string) ([]*PunchCard, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var results [][]int - resp, err := s.client.Do(ctx, req, &results) - if err != nil { - return nil, resp, err - } - - // convert int slices into Punchcards - var cards []*PunchCard - for _, result := range results { - if len(result) != 3 { - continue - } - card := &PunchCard{ - Day: Int(result[0]), - Hour: Int(result[1]), - Commits: Int(result[2]), - } - cards = append(cards, card) - } - - return cards, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_statuses.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_statuses.go deleted file mode 100644 index ea3d166c75..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_statuses.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepoStatus represents the status of a repository at a particular reference. -type RepoStatus struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - URL *string `json:"url,omitempty"` - - // State is the current state of the repository. Possible values are: - // pending, success, error, or failure. - State *string `json:"state,omitempty"` - - // TargetURL is the URL of the page representing this status. It will be - // linked from the GitHub UI to allow users to see the source of the status. - TargetURL *string `json:"target_url,omitempty"` - - // Description is a short high level summary of the status. - Description *string `json:"description,omitempty"` - - // A string label to differentiate this status from the statuses of other systems. - Context *string `json:"context,omitempty"` - - // AvatarURL is the URL of the avatar of this status. - AvatarURL *string `json:"avatar_url,omitempty"` - - Creator *User `json:"creator,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` -} - -func (r RepoStatus) String() string { - return Stringify(r) -} - -// ListStatuses lists the statuses of a repository at the specified -// reference. ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/statuses#list-commit-statuses-for-a-reference -func (s *RepositoriesService) ListStatuses(ctx context.Context, owner, repo, ref string, opts *ListOptions) ([]*RepoStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/statuses", owner, repo, refURLEscape(ref)) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var statuses []*RepoStatus - resp, err := s.client.Do(ctx, req, &statuses) - if err != nil { - return nil, resp, err - } - - return statuses, resp, nil -} - -// CreateStatus creates a new status for a repository at the specified -// reference. Ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/statuses#create-a-commit-status -func (s *RepositoriesService) CreateStatus(ctx context.Context, owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/statuses/%v", owner, repo, refURLEscape(ref)) - req, err := s.client.NewRequest("POST", u, status) - if err != nil { - return nil, nil, err - } - - repoStatus := new(RepoStatus) - resp, err := s.client.Do(ctx, req, repoStatus) - if err != nil { - return nil, resp, err - } - - return repoStatus, resp, nil -} - -// CombinedStatus represents the combined status of a repository at a particular reference. -type CombinedStatus struct { - // State is the combined state of the repository. Possible values are: - // failure, pending, or success. - State *string `json:"state,omitempty"` - - Name *string `json:"name,omitempty"` - SHA *string `json:"sha,omitempty"` - TotalCount *int `json:"total_count,omitempty"` - Statuses []*RepoStatus `json:"statuses,omitempty"` - - CommitURL *string `json:"commit_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` -} - -func (s CombinedStatus) String() string { - return Stringify(s) -} - -// GetCombinedStatus returns the combined status of a repository at the specified -// reference. ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: https://docs.github.com/en/rest/commits/statuses#get-the-combined-status-for-a-specific-reference -func (s *RepositoriesService) GetCombinedStatus(ctx context.Context, owner, repo, ref string, opts *ListOptions) (*CombinedStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/status", owner, repo, refURLEscape(ref)) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - status := new(CombinedStatus) - resp, err := s.client.Do(ctx, req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_tags.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_tags.go deleted file mode 100644 index ff46d90c73..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_tags.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// TagProtection represents a repository tag protection. -type TagProtection struct { - ID *int64 `json:"id"` - Pattern *string `json:"pattern"` -} - -// tagProtectionRequest represents a request to create tag protection. -type tagProtectionRequest struct { - // An optional glob pattern to match against when enforcing tag protection. - Pattern string `json:"pattern"` -} - -// ListTagProtection lists tag protection of the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/tags#list-tag-protection-states-for-a-repository -func (s *RepositoriesService) ListTagProtection(ctx context.Context, owner, repo string) ([]*TagProtection, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/tags/protection", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var tagProtections []*TagProtection - resp, err := s.client.Do(ctx, req, &tagProtections) - if err != nil { - return nil, resp, err - } - - return tagProtections, resp, nil -} - -// CreateTagProtection creates the tag protection of the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/tags#create-a-tag-protection-state-for-a-repository -func (s *RepositoriesService) CreateTagProtection(ctx context.Context, owner, repo, pattern string) (*TagProtection, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/tags/protection", owner, repo) - r := &tagProtectionRequest{Pattern: pattern} - req, err := s.client.NewRequest("POST", u, r) - if err != nil { - return nil, nil, err - } - - tagProtection := new(TagProtection) - resp, err := s.client.Do(ctx, req, tagProtection) - if err != nil { - return nil, resp, err - } - - return tagProtection, resp, nil -} - -// DeleteTagProtection deletes a tag protection from the specified repository. -// -// GitHub API docs: https://docs.github.com/en/rest/repos/tags#delete-a-tag-protection-state-for-a-repository -func (s *RepositoriesService) DeleteTagProtection(ctx context.Context, owner, repo string, tagProtectionID int64) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/tags/protection/%v", owner, repo, tagProtectionID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_traffic.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_traffic.go deleted file mode 100644 index bf093c03ea..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/repos_traffic.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// TrafficReferrer represent information about traffic from a referrer . -type TrafficReferrer struct { - Referrer *string `json:"referrer,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficPath represent information about the traffic on a path of the repo. -type TrafficPath struct { - Path *string `json:"path,omitempty"` - Title *string `json:"title,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficData represent information about a specific timestamp in views or clones list. -type TrafficData struct { - Timestamp *Timestamp `json:"timestamp,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficViews represent information about the number of views in the last 14 days. -type TrafficViews struct { - Views []*TrafficData `json:"views,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficClones represent information about the number of clones in the last 14 days. -type TrafficClones struct { - Clones []*TrafficData `json:"clones,omitempty"` - Count *int `json:"count,omitempty"` - Uniques *int `json:"uniques,omitempty"` -} - -// TrafficBreakdownOptions specifies the parameters to methods that support breakdown per day or week. -// Can be one of: day, week. Default: day. -type TrafficBreakdownOptions struct { - Per string `url:"per,omitempty"` -} - -// ListTrafficReferrers list the top 10 referrers over the last 14 days. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-top-referral-sources -func (s *RepositoriesService) ListTrafficReferrers(ctx context.Context, owner, repo string) ([]*TrafficReferrer, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/popular/referrers", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var trafficReferrers []*TrafficReferrer - resp, err := s.client.Do(ctx, req, &trafficReferrers) - if err != nil { - return nil, resp, err - } - - return trafficReferrers, resp, nil -} - -// ListTrafficPaths list the top 10 popular content over the last 14 days. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-top-referral-paths -func (s *RepositoriesService) ListTrafficPaths(ctx context.Context, owner, repo string) ([]*TrafficPath, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/popular/paths", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var paths []*TrafficPath - resp, err := s.client.Do(ctx, req, &paths) - if err != nil { - return nil, resp, err - } - - return paths, resp, nil -} - -// ListTrafficViews get total number of views for the last 14 days and breaks it down either per day or week. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-page-views -func (s *RepositoriesService) ListTrafficViews(ctx context.Context, owner, repo string, opts *TrafficBreakdownOptions) (*TrafficViews, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/views", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - trafficViews := new(TrafficViews) - resp, err := s.client.Do(ctx, req, &trafficViews) - if err != nil { - return nil, resp, err - } - - return trafficViews, resp, nil -} - -// ListTrafficClones get total number of clones for the last 14 days and breaks it down either per day or week for the last 14 days. -// -// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-repository-clones -func (s *RepositoriesService) ListTrafficClones(ctx context.Context, owner, repo string, opts *TrafficBreakdownOptions) (*TrafficClones, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/traffic/clones", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - trafficClones := new(TrafficClones) - resp, err := s.client.Do(ctx, req, &trafficClones) - if err != nil { - return nil, resp, err - } - - return trafficClones, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/scim.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/scim.go deleted file mode 100644 index 7deee6be4b..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/scim.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "encoding/json" - "fmt" -) - -// SCIMService provides access to SCIM related functions in the -// GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/scim -type SCIMService service - -// SCIMUserAttributes represents supported SCIM User attributes. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#supported-scim-user-attributes -type SCIMUserAttributes struct { - UserName string `json:"userName"` // Configured by the admin. Could be an email, login, or username. (Required.) - Name SCIMUserName `json:"name"` // (Required.) - DisplayName *string `json:"displayName,omitempty"` // The name of the user, suitable for display to end-users. (Optional.) - Emails []*SCIMUserEmail `json:"emails"` // User emails. (Required.) - Schemas []string `json:"schemas,omitempty"` // (Optional.) - ExternalID *string `json:"externalId,omitempty"` // (Optional.) - Groups []string `json:"groups,omitempty"` // (Optional.) - Active *bool `json:"active,omitempty"` // (Optional.) - // Only populated as a result of calling ListSCIMProvisionedIdentitiesOptions or GetSCIMProvisioningInfoForUser: - ID *string `json:"id,omitempty"` - Meta *SCIMMeta `json:"meta,omitempty"` -} - -// SCIMUserName represents SCIM user information. -type SCIMUserName struct { - GivenName string `json:"givenName"` // The first name of the user. (Required.) - FamilyName string `json:"familyName"` // The family name of the user. (Required.) - Formatted *string `json:"formatted,omitempty"` // (Optional.) -} - -// SCIMUserEmail represents SCIM user email. -type SCIMUserEmail struct { - Value string `json:"value"` // (Required.) - Primary *bool `json:"primary,omitempty"` // (Optional.) - Type *string `json:"type,omitempty"` // (Optional.) -} - -// SCIMMeta represents metadata about the SCIM resource. -type SCIMMeta struct { - ResourceType *string `json:"resourceType,omitempty"` - Created *Timestamp `json:"created,omitempty"` - LastModified *Timestamp `json:"lastModified,omitempty"` - Location *string `json:"location,omitempty"` -} - -// SCIMProvisionedIdentities represents the result of calling ListSCIMProvisionedIdentities. -type SCIMProvisionedIdentities struct { - Schemas []string `json:"schemas,omitempty"` - TotalResults *int `json:"totalResults,omitempty"` - ItemsPerPage *int `json:"itemsPerPage,omitempty"` - StartIndex *int `json:"startIndex,omitempty"` - Resources []*SCIMUserAttributes `json:"Resources,omitempty"` -} - -// ListSCIMProvisionedIdentitiesOptions represents options for ListSCIMProvisionedIdentities. -// -// Github API docs: https://docs.github.com/en/rest/scim#list-scim-provisioned-identities--parameters -type ListSCIMProvisionedIdentitiesOptions struct { - StartIndex *int `url:"startIndex,omitempty"` // Used for pagination: the index of the first result to return. (Optional.) - Count *int `url:"count,omitempty"` // Used for pagination: the number of results to return. (Optional.) - // Filter results using the equals query parameter operator (eq). - // You can filter results that are equal to id, userName, emails, and external_id. - // For example, to search for an identity with the userName Octocat, you would use this query: ?filter=userName%20eq%20\"Octocat\". - // To filter results for the identity with the email octocat@github.com, you would use this query: ?filter=emails%20eq%20\"octocat@github.com\". - // (Optional.) - Filter *string `url:"filter,omitempty"` -} - -// ListSCIMProvisionedIdentities lists SCIM provisioned identities. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#list-scim-provisioned-identities -func (s *SCIMService) ListSCIMProvisionedIdentities(ctx context.Context, org string, opts *ListSCIMProvisionedIdentitiesOptions) (*SCIMProvisionedIdentities, *Response, error) { - u := fmt.Sprintf("scim/v2/organizations/%v/Users", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - identities := new(SCIMProvisionedIdentities) - resp, err := s.client.Do(ctx, req, identities) - if err != nil { - return nil, resp, err - } - - return identities, resp, nil -} - -// ProvisionAndInviteSCIMUser provisions organization membership for a user, and sends an activation email to the email address. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#provision-and-invite-a-scim-user -func (s *SCIMService) ProvisionAndInviteSCIMUser(ctx context.Context, org string, opts *SCIMUserAttributes) (*Response, error) { - u := fmt.Sprintf("scim/v2/organizations/%v/Users", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, err - } - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// GetSCIMProvisioningInfoForUser returns SCIM provisioning information for a user. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#supported-scim-user-attributes -func (s *SCIMService) GetSCIMProvisioningInfoForUser(ctx context.Context, org, scimUserID string) (*SCIMUserAttributes, *Response, error) { - u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - user := new(SCIMUserAttributes) - resp, err := s.client.Do(ctx, req, &user) - if err != nil { - return nil, resp, err - } - - return user, resp, nil -} - -// UpdateProvisionedOrgMembership updates a provisioned organization membership. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#update-a-provisioned-organization-membership -func (s *SCIMService) UpdateProvisionedOrgMembership(ctx context.Context, org, scimUserID string, opts *SCIMUserAttributes) (*Response, error) { - u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) - u, err := addOptions(u, opts) - if err != nil { - return nil, err - } - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// UpdateAttributeForSCIMUserOptions represents options for UpdateAttributeForSCIMUser. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#update-an-attribute-for-a-scim-user--parameters -type UpdateAttributeForSCIMUserOptions struct { - Schemas []string `json:"schemas,omitempty"` // (Optional.) - Operations UpdateAttributeForSCIMUserOperations `json:"operations"` // Set of operations to be performed. (Required.) -} - -// UpdateAttributeForSCIMUserOperations represents operations for UpdateAttributeForSCIMUser. -type UpdateAttributeForSCIMUserOperations struct { - Op string `json:"op"` // (Required.) - Path *string `json:"path,omitempty"` // (Optional.) - Value json.RawMessage `json:"value,omitempty"` // (Optional.) -} - -// UpdateAttributeForSCIMUser updates an attribute for an SCIM user. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#update-an-attribute-for-a-scim-user -func (s *SCIMService) UpdateAttributeForSCIMUser(ctx context.Context, org, scimUserID string, opts *UpdateAttributeForSCIMUserOptions) (*Response, error) { - u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) - u, err := addOptions(u, opts) - if err != nil { - return nil, err - } - - req, err := s.client.NewRequest("PATCH", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteSCIMUserFromOrg deletes SCIM user from an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/scim#delete-a-scim-user-from-an-organization -func (s *SCIMService) DeleteSCIMUserFromOrg(ctx context.Context, org, scimUserID string) (*Response, error) { - u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/search.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/search.go deleted file mode 100644 index adb832d0d8..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/search.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "strconv" - "strings" - - qs "github.com/google/go-querystring/query" -) - -// SearchService provides access to the search related functions -// in the GitHub API. -// -// Each method takes a query string defining the search keywords and any search qualifiers. -// For example, when searching issues, the query "gopher is:issue language:go" will search -// for issues containing the word "gopher" in Go repositories. The method call -// -// opts := &github.SearchOptions{Sort: "created", Order: "asc"} -// cl.Search.Issues(ctx, "gopher is:issue language:go", opts) -// -// will search for such issues, sorting by creation date in ascending order -// (i.e., oldest first). -// -// If query includes multiple conditions, it MUST NOT include "+" as the condition separator. -// You have to use " " as the separator instead. -// For example, querying with "language:c++" and "leveldb", then query should be -// "language:c++ leveldb" but not "language:c+++leveldb". -// -// GitHub API docs: https://docs.github.com/en/rest/search/ -type SearchService service - -// SearchOptions specifies optional parameters to the SearchService methods. -type SearchOptions struct { - // How to sort the search results. Possible values are: - // - for repositories: stars, fork, updated - // - for commits: author-date, committer-date - // - for code: indexed - // - for issues: comments, created, updated - // - for users: followers, repositories, joined - // - // Default is to sort by best match. - Sort string `url:"sort,omitempty"` - - // Sort order if sort parameter is provided. Possible values are: asc, - // desc. Default is desc. - Order string `url:"order,omitempty"` - - // Whether to retrieve text match metadata with a query - TextMatch bool `url:"-"` - - ListOptions -} - -// Common search parameters. -type searchParameters struct { - Query string - RepositoryID *int64 // Sent if non-nil. -} - -// RepositoriesSearchResult represents the result of a repositories search. -type RepositoriesSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Repositories []*Repository `json:"items,omitempty"` -} - -// Repositories searches repositories via various criteria. -// -// GitHub API docs: https://docs.github.com/en/rest/search#search-repositories -func (s *SearchService) Repositories(ctx context.Context, query string, opts *SearchOptions) (*RepositoriesSearchResult, *Response, error) { - result := new(RepositoriesSearchResult) - resp, err := s.search(ctx, "repositories", &searchParameters{Query: query}, opts, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// TopicsSearchResult represents the result of a topics search. -type TopicsSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Topics []*TopicResult `json:"items,omitempty"` -} - -type TopicResult struct { - Name *string `json:"name,omitempty"` - DisplayName *string `json:"display_name,omitempty"` - ShortDescription *string `json:"short_description,omitempty"` - Description *string `json:"description,omitempty"` - CreatedBy *string `json:"created_by,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *string `json:"updated_at,omitempty"` - Featured *bool `json:"featured,omitempty"` - Curated *bool `json:"curated,omitempty"` - Score *float64 `json:"score,omitempty"` -} - -// Topics finds topics via various criteria. Results are sorted by best match. -// Please see https://help.github.com/en/articles/searching-topics for more -// information about search qualifiers. -// -// GitHub API docs: https://docs.github.com/en/rest/search#search-topics -func (s *SearchService) Topics(ctx context.Context, query string, opts *SearchOptions) (*TopicsSearchResult, *Response, error) { - result := new(TopicsSearchResult) - resp, err := s.search(ctx, "topics", &searchParameters{Query: query}, opts, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// CommitsSearchResult represents the result of a commits search. -type CommitsSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Commits []*CommitResult `json:"items,omitempty"` -} - -// CommitResult represents a commit object as returned in commit search endpoint response. -type CommitResult struct { - SHA *string `json:"sha,omitempty"` - Commit *Commit `json:"commit,omitempty"` - Author *User `json:"author,omitempty"` - Committer *User `json:"committer,omitempty"` - Parents []*Commit `json:"parents,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - - Repository *Repository `json:"repository,omitempty"` - Score *float64 `json:"score,omitempty"` -} - -// Commits searches commits via various criteria. -// -// GitHub API docs: https://docs.github.com/en/rest/search#search-commits -func (s *SearchService) Commits(ctx context.Context, query string, opts *SearchOptions) (*CommitsSearchResult, *Response, error) { - result := new(CommitsSearchResult) - resp, err := s.search(ctx, "commits", &searchParameters{Query: query}, opts, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// IssuesSearchResult represents the result of an issues search. -type IssuesSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Issues []*Issue `json:"items,omitempty"` -} - -// Issues searches issues via various criteria. -// -// GitHub API docs: https://docs.github.com/en/rest/search#search-issues-and-pull-requests -func (s *SearchService) Issues(ctx context.Context, query string, opts *SearchOptions) (*IssuesSearchResult, *Response, error) { - result := new(IssuesSearchResult) - resp, err := s.search(ctx, "issues", &searchParameters{Query: query}, opts, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// UsersSearchResult represents the result of a users search. -type UsersSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Users []*User `json:"items,omitempty"` -} - -// Users searches users via various criteria. -// -// GitHub API docs: https://docs.github.com/en/rest/search#search-users -func (s *SearchService) Users(ctx context.Context, query string, opts *SearchOptions) (*UsersSearchResult, *Response, error) { - result := new(UsersSearchResult) - resp, err := s.search(ctx, "users", &searchParameters{Query: query}, opts, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// Match represents a single text match. -type Match struct { - Text *string `json:"text,omitempty"` - Indices []int `json:"indices,omitempty"` -} - -// TextMatch represents a text match for a SearchResult -type TextMatch struct { - ObjectURL *string `json:"object_url,omitempty"` - ObjectType *string `json:"object_type,omitempty"` - Property *string `json:"property,omitempty"` - Fragment *string `json:"fragment,omitempty"` - Matches []*Match `json:"matches,omitempty"` -} - -func (tm TextMatch) String() string { - return Stringify(tm) -} - -// CodeSearchResult represents the result of a code search. -type CodeSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - CodeResults []*CodeResult `json:"items,omitempty"` -} - -// CodeResult represents a single search result. -type CodeResult struct { - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - SHA *string `json:"sha,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Repository *Repository `json:"repository,omitempty"` - TextMatches []*TextMatch `json:"text_matches,omitempty"` -} - -func (c CodeResult) String() string { - return Stringify(c) -} - -// Code searches code via various criteria. -// -// GitHub API docs: https://docs.github.com/en/rest/search#search-code -func (s *SearchService) Code(ctx context.Context, query string, opts *SearchOptions) (*CodeSearchResult, *Response, error) { - result := new(CodeSearchResult) - resp, err := s.search(ctx, "code", &searchParameters{Query: query}, opts, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// LabelsSearchResult represents the result of a code search. -type LabelsSearchResult struct { - Total *int `json:"total_count,omitempty"` - IncompleteResults *bool `json:"incomplete_results,omitempty"` - Labels []*LabelResult `json:"items,omitempty"` -} - -// LabelResult represents a single search result. -type LabelResult struct { - ID *int64 `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Color *string `json:"color,omitempty"` - Default *bool `json:"default,omitempty"` - Description *string `json:"description,omitempty"` - Score *float64 `json:"score,omitempty"` -} - -func (l LabelResult) String() string { - return Stringify(l) -} - -// Labels searches labels in the repository with ID repoID via various criteria. -// -// GitHub API docs: https://docs.github.com/en/rest/search#search-labels -func (s *SearchService) Labels(ctx context.Context, repoID int64, query string, opts *SearchOptions) (*LabelsSearchResult, *Response, error) { - result := new(LabelsSearchResult) - resp, err := s.search(ctx, "labels", &searchParameters{RepositoryID: &repoID, Query: query}, opts, result) - if err != nil { - return nil, resp, err - } - - return result, resp, nil -} - -// Helper function that executes search queries against different -// GitHub search types (repositories, commits, code, issues, users, labels) -// -// If searchParameters.Query includes multiple condition, it MUST NOT include "+" as condition separator. -// For example, querying with "language:c++" and "leveldb", then searchParameters.Query should be "language:c++ leveldb" but not "language:c+++leveldb". -func (s *SearchService) search(ctx context.Context, searchType string, parameters *searchParameters, opts *SearchOptions, result interface{}) (*Response, error) { - params, err := qs.Values(opts) - if err != nil { - return nil, err - } - - if parameters.RepositoryID != nil { - params.Set("repository_id", strconv.FormatInt(*parameters.RepositoryID, 10)) - } - params.Set("q", parameters.Query) - u := fmt.Sprintf("search/%s?%s", searchType, params.Encode()) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - var acceptHeaders []string - switch { - case searchType == "commits": - // Accept header for search commits preview endpoint - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders = append(acceptHeaders, mediaTypeCommitSearchPreview) - case searchType == "topics": - // Accept header for search repositories based on topics preview endpoint - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders = append(acceptHeaders, mediaTypeTopicsPreview) - case searchType == "repositories": - // Accept header for search repositories based on topics preview endpoint - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders = append(acceptHeaders, mediaTypeTopicsPreview) - case searchType == "issues": - // Accept header for search issues based on reactions preview endpoint - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders = append(acceptHeaders, mediaTypeReactionsPreview) - } - // https://docs.github.com/en/rest/search#search-repositories - // Accept header defaults to "application/vnd.github.v3+json" - // We change it here to fetch back text-match metadata - if opts != nil && opts.TextMatch { - acceptHeaders = append(acceptHeaders, "application/vnd.github.v3.text-match+json") - } - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - return s.client.Do(ctx, req, result) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/secret_scanning.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/secret_scanning.go deleted file mode 100644 index d512560d9f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/secret_scanning.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// SecretScanningService handles communication with the secret scanning related -// methods of the GitHub API. -type SecretScanningService service - -// SecretScanningAlert represents a GitHub secret scanning alert. -type SecretScanningAlert struct { - Number *int `json:"number,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - LocationsURL *string `json:"locations_url,omitempty"` - State *string `json:"state,omitempty"` - Resolution *string `json:"resolution,omitempty"` - ResolvedAt *Timestamp `json:"resolved_at,omitempty"` - ResolvedBy *User `json:"resolved_by,omitempty"` - SecretType *string `json:"secret_type,omitempty"` - Secret *string `json:"secret,omitempty"` -} - -// SecretScanningAlertLocation represents the location for a secret scanning alert. -type SecretScanningAlertLocation struct { - Type *string `json:"type,omitempty"` - Details *SecretScanningAlertLocationDetails `json:"details,omitempty"` -} - -// SecretScanningAlertLocationDetails represents the location details for a secret scanning alert. -type SecretScanningAlertLocationDetails struct { - Path *string `json:"path,omitempty"` - Startline *int `json:"start_line,omitempty"` - EndLine *int `json:"end_line,omitempty"` - StartColumn *int `json:"start_column,omitempty"` - EndColumn *int `json:"end_column,omitempty"` - BlobSHA *string `json:"blob_sha,omitempty"` - BlobURL *string `json:"blob_url,omitempty"` - CommitSHA *string `json:"commit_sha,omitempty"` - CommitURL *string `json:"commit_url,omitempty"` -} - -// SecretScanningAlertListOptions specifies optional parameters to the SecretScanningService.ListAlertsForEnterprise method. -type SecretScanningAlertListOptions struct { - // State of the secret scanning alerts to list. Set to open or resolved to only list secret scanning alerts in a specific state. - State string `url:"state,omitempty"` - - // A comma-separated list of secret types to return. By default all secret types are returned. - SecretType string `url:"secret_type,omitempty"` - - // A comma-separated list of resolutions. Only secret scanning alerts with one of these resolutions are listed. - // Valid resolutions are false_positive, wont_fix, revoked, pattern_edited, pattern_deleted or used_in_tests. - Resolution string `url:"resolution,omitempty"` - - ListCursorOptions - - // List options can vary on the Enterprise type. - // On Enterprise Cloud, Secret Scan alerts support requesting by page number - // along with providing a cursor for an "after" param. - // See: https://docs.github.com/en/enterprise-cloud@latest/rest/secret-scanning#list-secret-scanning-alerts-for-an-organization - // Whereas on Enterprise Server, pagination is by index. - // See: https://docs.github.com/en/enterprise-server@3.6/rest/secret-scanning#list-secret-scanning-alerts-for-an-organization - ListOptions -} - -// SecretScanningAlertUpdateOptions specifies optional parameters to the SecretScanningService.UpdateAlert method. -type SecretScanningAlertUpdateOptions struct { - // Required. Sets the state of the secret scanning alert. Can be either open or resolved. - // You must provide resolution when you set the state to resolved. - State *string `url:"state,omitempty"` - - // A comma-separated list of secret types to return. By default all secret types are returned. - SecretType *string `url:"secret_type,omitempty"` - - // Required when the state is resolved. The reason for resolving the alert. Can be one of false_positive, - // wont_fix, revoked, or used_in_tests. - Resolution *string `url:"resolution,omitempty"` -} - -// Lists secret scanning alerts for eligible repositories in an enterprise, from newest to oldest. -// -// To use this endpoint, you must be a member of the enterprise, and you must use an access token with the repo scope or -// security_events scope. Alerts are only returned for organizations in the enterprise for which you are an organization owner or a security manager. -// -// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-secret-scanning-alerts-for-an-enterprise -func (s *SecretScanningService) ListAlertsForEnterprise(ctx context.Context, enterprise string, opts *SecretScanningAlertListOptions) ([]*SecretScanningAlert, *Response, error) { - u := fmt.Sprintf("enterprises/%v/secret-scanning/alerts", enterprise) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var alerts []*SecretScanningAlert - resp, err := s.client.Do(ctx, req, &alerts) - if err != nil { - return nil, resp, err - } - - return alerts, resp, nil -} - -// Lists secret scanning alerts for eligible repositories in an organization, from newest to oldest. -// -// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with -// the repo scope or security_events scope. -// -// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-secret-scanning-alerts-for-an-organization -func (s *SecretScanningService) ListAlertsForOrg(ctx context.Context, org string, opts *SecretScanningAlertListOptions) ([]*SecretScanningAlert, *Response, error) { - u := fmt.Sprintf("orgs/%v/secret-scanning/alerts", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var alerts []*SecretScanningAlert - resp, err := s.client.Do(ctx, req, &alerts) - if err != nil { - return nil, resp, err - } - - return alerts, resp, nil -} - -// Lists secret scanning alerts for a private repository, from newest to oldest. -// -// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with -// the repo scope or security_events scope. -// -// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-secret-scanning-alerts-for-a-repository -func (s *SecretScanningService) ListAlertsForRepo(ctx context.Context, owner, repo string, opts *SecretScanningAlertListOptions) ([]*SecretScanningAlert, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var alerts []*SecretScanningAlert - resp, err := s.client.Do(ctx, req, &alerts) - if err != nil { - return nil, resp, err - } - - return alerts, resp, nil -} - -// Gets a single secret scanning alert detected in a private repository. -// -// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with -// the repo scope or security_events scope. -// -// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#get-a-secret-scanning-alert -func (s *SecretScanningService) GetAlert(ctx context.Context, owner, repo string, number int64) (*SecretScanningAlert, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts/%v", owner, repo, number) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var alert *SecretScanningAlert - resp, err := s.client.Do(ctx, req, &alert) - if err != nil { - return nil, resp, err - } - - return alert, resp, nil -} - -// Updates the status of a secret scanning alert in a private repository. -// -// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with -// the repo scope or security_events scope. -// -// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#update-a-secret-scanning-alert -func (s *SecretScanningService) UpdateAlert(ctx context.Context, owner, repo string, number int64, opts *SecretScanningAlertUpdateOptions) (*SecretScanningAlert, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts/%v", owner, repo, number) - - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - var alert *SecretScanningAlert - resp, err := s.client.Do(ctx, req, &alert) - if err != nil { - return nil, resp, err - } - - return alert, resp, nil -} - -// Lists all locations for a given secret scanning alert for a private repository. -// -// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with -// the repo scope or security_events scope. -// -// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-locations-for-a-secret-scanning-alert -func (s *SecretScanningService) ListLocationsForAlert(ctx context.Context, owner, repo string, number int64, opts *ListOptions) ([]*SecretScanningAlertLocation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts/%v/locations", owner, repo, number) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var locations []*SecretScanningAlertLocation - resp, err := s.client.Do(ctx, req, &locations) - if err != nil { - return nil, resp, err - } - - return locations, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/strings.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/strings.go deleted file mode 100644 index 5611b96a88..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/strings.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "fmt" - "io" - - "reflect" -) - -var timestampType = reflect.TypeOf(Timestamp{}) - -// Stringify attempts to create a reasonable string representation of types in -// the GitHub library. It does things like resolve pointers to their values -// and omits struct fields with nil values. -func Stringify(message interface{}) string { - var buf bytes.Buffer - v := reflect.ValueOf(message) - stringifyValue(&buf, v) - return buf.String() -} - -// stringifyValue was heavily inspired by the goprotobuf library. - -func stringifyValue(w io.Writer, val reflect.Value) { - if val.Kind() == reflect.Ptr && val.IsNil() { - w.Write([]byte("")) - return - } - - v := reflect.Indirect(val) - - switch v.Kind() { - case reflect.String: - fmt.Fprintf(w, `"%s"`, v) - case reflect.Slice: - w.Write([]byte{'['}) - for i := 0; i < v.Len(); i++ { - if i > 0 { - w.Write([]byte{' '}) - } - - stringifyValue(w, v.Index(i)) - } - - w.Write([]byte{']'}) - return - case reflect.Struct: - if v.Type().Name() != "" { - w.Write([]byte(v.Type().String())) - } - - // special handling of Timestamp values - if v.Type() == timestampType { - fmt.Fprintf(w, "{%s}", v.Interface()) - return - } - - w.Write([]byte{'{'}) - - var sep bool - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - if fv.Kind() == reflect.Ptr && fv.IsNil() { - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - continue - } - if fv.Kind() == reflect.Map && fv.IsNil() { - continue - } - - if sep { - w.Write([]byte(", ")) - } else { - sep = true - } - - w.Write([]byte(v.Type().Field(i).Name)) - w.Write([]byte{':'}) - stringifyValue(w, fv) - } - - w.Write([]byte{'}'}) - default: - if v.CanInterface() { - fmt.Fprint(w, v.Interface()) - } - } -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/teams.go deleted file mode 100644 index 0ee7c20079..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams.go +++ /dev/null @@ -1,992 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/http" - "strings" -) - -// TeamsService provides access to the team-related functions -// in the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/ -type TeamsService service - -// Team represents a team within a GitHub organization. Teams are used to -// manage access to an organization's repositories. -type Team struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - URL *string `json:"url,omitempty"` - Slug *string `json:"slug,omitempty"` - - // Permission specifies the default permission for repositories owned by the team. - Permission *string `json:"permission,omitempty"` - - // Permissions identifies the permissions that a team has on a given - // repository. This is only populated when calling Repositories.ListTeams. - Permissions map[string]bool `json:"permissions,omitempty"` - - // Privacy identifies the level of privacy this team should have. - // Possible values are: - // secret - only visible to organization owners and members of this team - // closed - visible to all members of this organization - // Default is "secret". - Privacy *string `json:"privacy,omitempty"` - - MembersCount *int `json:"members_count,omitempty"` - ReposCount *int `json:"repos_count,omitempty"` - Organization *Organization `json:"organization,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - MembersURL *string `json:"members_url,omitempty"` - RepositoriesURL *string `json:"repositories_url,omitempty"` - Parent *Team `json:"parent,omitempty"` - - // LDAPDN is only available in GitHub Enterprise and when the team - // membership is synchronized with LDAP. - LDAPDN *string `json:"ldap_dn,omitempty"` -} - -func (t Team) String() string { - return Stringify(t) -} - -// Invitation represents a team member's invitation status. -type Invitation struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Login *string `json:"login,omitempty"` - Email *string `json:"email,omitempty"` - // Role can be one of the values - 'direct_member', 'admin', 'billing_manager', 'hiring_manager', or 'reinstate'. - Role *string `json:"role,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - Inviter *User `json:"inviter,omitempty"` - TeamCount *int `json:"team_count,omitempty"` - InvitationTeamURL *string `json:"invitation_team_url,omitempty"` - FailedAt *Timestamp `json:"failed_at,omitempty"` - FailedReason *string `json:"failed_reason,omitempty"` -} - -func (i Invitation) String() string { - return Stringify(i) -} - -// ListTeams lists all of the teams for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-teams -func (s *TeamsService) ListTeams(ctx context.Context, org string, opts *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// GetTeamByID fetches a team, given a specified organization ID, by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#get-a-team-by-name -func (s *TeamsService) GetTeamByID(ctx context.Context, orgID, teamID int64) (*Team, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v", orgID, teamID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// GetTeamBySlug fetches a team, given a specified organization name, by slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#get-a-team-by-name -func (s *TeamsService) GetTeamBySlug(ctx context.Context, org, slug string) (*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v", org, slug) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// NewTeam represents a team to be created or modified. -type NewTeam struct { - Name string `json:"name"` // Name of the team. (Required.) - Description *string `json:"description,omitempty"` - Maintainers []string `json:"maintainers,omitempty"` - RepoNames []string `json:"repo_names,omitempty"` - ParentTeamID *int64 `json:"parent_team_id,omitempty"` - - // Deprecated: Permission is deprecated when creating or editing a team in an org - // using the new GitHub permission model. It no longer identifies the - // permission a team has on its repos, but only specifies the default - // permission a repo is initially added with. Avoid confusion by - // specifying a permission value when calling AddTeamRepo. - Permission *string `json:"permission,omitempty"` - - // Privacy identifies the level of privacy this team should have. - // Possible values are: - // secret - only visible to organization owners and members of this team - // closed - visible to all members of this organization - // Default is "secret". - Privacy *string `json:"privacy,omitempty"` - - // LDAPDN may be used in GitHub Enterprise when the team membership - // is synchronized with LDAP. - LDAPDN *string `json:"ldap_dn,omitempty"` -} - -func (s NewTeam) String() string { - return Stringify(s) -} - -// CreateTeam creates a new team within an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#create-a-team -func (s *TeamsService) CreateTeam(ctx context.Context, org string, team NewTeam) (*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams", org) - req, err := s.client.NewRequest("POST", u, team) - if err != nil { - return nil, nil, err - } - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// newTeamNoParent is the same as NewTeam but ensures that the -// "parent_team_id" field will be null. It is for internal use -// only and should not be exported. -type newTeamNoParent struct { - Name string `json:"name"` - Description *string `json:"description,omitempty"` - Maintainers []string `json:"maintainers,omitempty"` - RepoNames []string `json:"repo_names,omitempty"` - ParentTeamID *int64 `json:"parent_team_id"` // This will be "null" - Privacy *string `json:"privacy,omitempty"` - LDAPDN *string `json:"ldap_dn,omitempty"` -} - -// copyNewTeamWithoutParent is used to set the "parent_team_id" -// field to "null" after copying the other fields from a NewTeam. -// It is for internal use only and should not be exported. -func copyNewTeamWithoutParent(team *NewTeam) *newTeamNoParent { - return &newTeamNoParent{ - Name: team.Name, - Description: team.Description, - Maintainers: team.Maintainers, - RepoNames: team.RepoNames, - Privacy: team.Privacy, - LDAPDN: team.LDAPDN, - } -} - -// EditTeamByID edits a team, given an organization ID, selected by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#update-a-team -func (s *TeamsService) EditTeamByID(ctx context.Context, orgID, teamID int64, team NewTeam, removeParent bool) (*Team, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v", orgID, teamID) - - var req *http.Request - var err error - if removeParent { - teamRemoveParent := copyNewTeamWithoutParent(&team) - req, err = s.client.NewRequest("PATCH", u, teamRemoveParent) - } else { - req, err = s.client.NewRequest("PATCH", u, team) - } - if err != nil { - return nil, nil, err - } - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// EditTeamBySlug edits a team, given an organization name, by slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#update-a-team -func (s *TeamsService) EditTeamBySlug(ctx context.Context, org, slug string, team NewTeam, removeParent bool) (*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v", org, slug) - - var req *http.Request - var err error - if removeParent { - teamRemoveParent := copyNewTeamWithoutParent(&team) - req, err = s.client.NewRequest("PATCH", u, teamRemoveParent) - } else { - req, err = s.client.NewRequest("PATCH", u, team) - } - if err != nil { - return nil, nil, err - } - - t := new(Team) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// DeleteTeamByID deletes a team referenced by ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#delete-a-team -func (s *TeamsService) DeleteTeamByID(ctx context.Context, orgID, teamID int64) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v", orgID, teamID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteTeamBySlug deletes a team reference by slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#delete-a-team -func (s *TeamsService) DeleteTeamBySlug(ctx context.Context, org, slug string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v", org, slug) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListChildTeamsByParentID lists child teams for a parent team given parent ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-child-teams -func (s *TeamsService) ListChildTeamsByParentID(ctx context.Context, orgID, teamID int64, opts *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/teams", orgID, teamID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// ListChildTeamsByParentSlug lists child teams for a parent team given parent slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-child-teams -func (s *TeamsService) ListChildTeamsByParentSlug(ctx context.Context, org, slug string, opts *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/teams", org, slug) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// ListTeamReposByID lists the repositories given a team ID that the specified team has access to. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-repositories -func (s *TeamsService) ListTeamReposByID(ctx context.Context, orgID, teamID int64, opts *ListOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/repos", orgID, teamID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when topics API fully launches. - headers := []string{mediaTypeTopicsPreview} - req.Header.Set("Accept", strings.Join(headers, ", ")) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// ListTeamReposBySlug lists the repositories given a team slug that the specified team has access to. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-repositories -func (s *TeamsService) ListTeamReposBySlug(ctx context.Context, org, slug string, opts *ListOptions) ([]*Repository, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/repos", org, slug) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when topics API fully launches. - headers := []string{mediaTypeTopicsPreview} - req.Header.Set("Accept", strings.Join(headers, ", ")) - - var repos []*Repository - resp, err := s.client.Do(ctx, req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// IsTeamRepoByID checks if a team, given its ID, manages the specified repository. If the -// repository is managed by team, a Repository is returned which includes the -// permissions team has for that repo. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-repository -func (s *TeamsService) IsTeamRepoByID(ctx context.Context, orgID, teamID int64, owner, repo string) (*Repository, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/repos/%v/%v", orgID, teamID, owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - headers := []string{mediaTypeOrgPermissionRepo} - req.Header.Set("Accept", strings.Join(headers, ", ")) - - repository := new(Repository) - resp, err := s.client.Do(ctx, req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, nil -} - -// IsTeamRepoBySlug checks if a team, given its slug, manages the specified repository. If the -// repository is managed by team, a Repository is returned which includes the -// permissions team has for that repo. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-repository -func (s *TeamsService) IsTeamRepoBySlug(ctx context.Context, org, slug, owner, repo string) (*Repository, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/repos/%v/%v", org, slug, owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - headers := []string{mediaTypeOrgPermissionRepo} - req.Header.Set("Accept", strings.Join(headers, ", ")) - - repository := new(Repository) - resp, err := s.client.Do(ctx, req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, nil -} - -// TeamAddTeamRepoOptions specifies the optional parameters to the -// TeamsService.AddTeamRepoByID and TeamsService.AddTeamRepoBySlug methods. -type TeamAddTeamRepoOptions struct { - // Permission specifies the permission to grant the team on this repository. - // Possible values are: - // pull - team members can pull, but not push to or administer this repository - // push - team members can pull and push, but not administer this repository - // admin - team members can pull, push and administer this repository - // maintain - team members can manage the repository without access to sensitive or destructive actions. - // triage - team members can proactively manage issues and pull requests without write access. - // - // If not specified, the team's permission attribute will be used. - Permission string `json:"permission,omitempty"` -} - -// AddTeamRepoByID adds a repository to be managed by the specified team given the team ID. -// The specified repository must be owned by the organization to which the team -// belongs, or a direct fork of a repository owned by the organization. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-repository-permissions -func (s *TeamsService) AddTeamRepoByID(ctx context.Context, orgID, teamID int64, owner, repo string, opts *TeamAddTeamRepoOptions) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/repos/%v/%v", orgID, teamID, owner, repo) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// AddTeamRepoBySlug adds a repository to be managed by the specified team given the team slug. -// The specified repository must be owned by the organization to which the team -// belongs, or a direct fork of a repository owned by the organization. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-repository-permissions -func (s *TeamsService) AddTeamRepoBySlug(ctx context.Context, org, slug, owner, repo string, opts *TeamAddTeamRepoOptions) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/repos/%v/%v", org, slug, owner, repo) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveTeamRepoByID removes a repository from being managed by the specified -// team given the team ID. Note that this does not delete the repository, it -// just removes it from the team. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-repository-from-a-team -func (s *TeamsService) RemoveTeamRepoByID(ctx context.Context, orgID, teamID int64, owner, repo string) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/repos/%v/%v", orgID, teamID, owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveTeamRepoBySlug removes a repository from being managed by the specified -// team given the team slug. Note that this does not delete the repository, it -// just removes it from the team. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-repository-from-a-team -func (s *TeamsService) RemoveTeamRepoBySlug(ctx context.Context, org, slug, owner, repo string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/repos/%v/%v", org, slug, owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListUserTeams lists a user's teams -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-teams-for-the-authenticated-user -func (s *TeamsService) ListUserTeams(ctx context.Context, opts *ListOptions) ([]*Team, *Response, error) { - u := "user/teams" - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// ListTeamProjectsByID lists the organization projects for a team given the team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-projects -func (s *TeamsService) ListTeamProjectsByID(ctx context.Context, orgID, teamID int64) ([]*Project, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/projects", orgID, teamID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var projects []*Project - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// ListTeamProjectsBySlug lists the organization projects for a team given the team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-projects -func (s *TeamsService) ListTeamProjectsBySlug(ctx context.Context, org, slug string) ([]*Project, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/projects", org, slug) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - var projects []*Project - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// ReviewTeamProjectsByID checks whether a team, given its ID, has read, write, or admin -// permissions for an organization project. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-project -func (s *TeamsService) ReviewTeamProjectsByID(ctx context.Context, orgID, teamID, projectID int64) (*Project, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/projects/%v", orgID, teamID, projectID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - projects := &Project{} - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// ReviewTeamProjectsBySlug checks whether a team, given its slug, has read, write, or admin -// permissions for an organization project. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-project -func (s *TeamsService) ReviewTeamProjectsBySlug(ctx context.Context, org, slug string, projectID int64) (*Project, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/projects/%v", org, slug, projectID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - projects := &Project{} - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// TeamProjectOptions specifies the optional parameters to the -// TeamsService.AddTeamProject method. -type TeamProjectOptions struct { - // Permission specifies the permission to grant to the team for this project. - // Possible values are: - // "read" - team members can read, but not write to or administer this project. - // "write" - team members can read and write, but not administer this project. - // "admin" - team members can read, write and administer this project. - // - Permission *string `json:"permission,omitempty"` -} - -// AddTeamProjectByID adds an organization project to a team given the team ID. -// To add a project to a team or update the team's permission on a project, the -// authenticated user must have admin permissions for the project. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-project-permissions -func (s *TeamsService) AddTeamProjectByID(ctx context.Context, orgID, teamID, projectID int64, opts *TeamProjectOptions) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/projects/%v", orgID, teamID, projectID) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - return s.client.Do(ctx, req, nil) -} - -// AddTeamProjectBySlug adds an organization project to a team given the team slug. -// To add a project to a team or update the team's permission on a project, the -// authenticated user must have admin permissions for the project. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-project-permissions -func (s *TeamsService) AddTeamProjectBySlug(ctx context.Context, org, slug string, projectID int64, opts *TeamProjectOptions) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/projects/%v", org, slug, projectID) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - return s.client.Do(ctx, req, nil) -} - -// RemoveTeamProjectByID removes an organization project from a team given team ID. -// An organization owner or a team maintainer can remove any project from the team. -// To remove a project from a team as an organization member, the authenticated user -// must have "read" access to both the team and project, or "admin" access to the team -// or project. -// Note: This endpoint removes the project from the team, but does not delete it. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-project-from-a-team -func (s *TeamsService) RemoveTeamProjectByID(ctx context.Context, orgID, teamID, projectID int64) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/projects/%v", orgID, teamID, projectID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - return s.client.Do(ctx, req, nil) -} - -// RemoveTeamProjectBySlug removes an organization project from a team given team slug. -// An organization owner or a team maintainer can remove any project from the team. -// To remove a project from a team as an organization member, the authenticated user -// must have "read" access to both the team and project, or "admin" access to the team -// or project. -// Note: This endpoint removes the project from the team, but does not delete it. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-project-from-a-team -func (s *TeamsService) RemoveTeamProjectBySlug(ctx context.Context, org, slug string, projectID int64) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/projects/%v", org, slug, projectID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - acceptHeaders := []string{mediaTypeProjectsPreview} - req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) - - return s.client.Do(ctx, req, nil) -} - -// IDPGroupList represents a list of external identity provider (IDP) groups. -type IDPGroupList struct { - Groups []*IDPGroup `json:"groups"` -} - -// IDPGroup represents an external identity provider (IDP) group. -type IDPGroup struct { - GroupID *string `json:"group_id,omitempty"` - GroupName *string `json:"group_name,omitempty"` - GroupDescription *string `json:"group_description,omitempty"` -} - -// ListIDPGroupsInOrganization lists IDP groups available in an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#list-idp-groups-for-an-organization -func (s *TeamsService) ListIDPGroupsInOrganization(ctx context.Context, org string, opts *ListCursorOptions) (*IDPGroupList, *Response, error) { - u := fmt.Sprintf("orgs/%v/team-sync/groups", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - groups := new(IDPGroupList) - resp, err := s.client.Do(ctx, req, groups) - if err != nil { - return nil, resp, err - } - - return groups, resp, nil -} - -// ListIDPGroupsForTeamByID lists IDP groups connected to a team on GitHub -// given organization and team IDs. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#list-idp-groups-for-a-team -func (s *TeamsService) ListIDPGroupsForTeamByID(ctx context.Context, orgID, teamID int64) (*IDPGroupList, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/team-sync/group-mappings", orgID, teamID) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - groups := new(IDPGroupList) - resp, err := s.client.Do(ctx, req, groups) - if err != nil { - return nil, resp, err - } - - return groups, resp, nil -} - -// ListIDPGroupsForTeamBySlug lists IDP groups connected to a team on GitHub -// given organization name and team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#list-idp-groups-for-a-team -func (s *TeamsService) ListIDPGroupsForTeamBySlug(ctx context.Context, org, slug string) (*IDPGroupList, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/team-sync/group-mappings", org, slug) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - groups := new(IDPGroupList) - resp, err := s.client.Do(ctx, req, groups) - if err != nil { - return nil, resp, err - } - - return groups, resp, nil -} - -// CreateOrUpdateIDPGroupConnectionsByID creates, updates, or removes a connection -// between a team and an IDP group given organization and team IDs. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#create-or-update-idp-group-connections -func (s *TeamsService) CreateOrUpdateIDPGroupConnectionsByID(ctx context.Context, orgID, teamID int64, opts IDPGroupList) (*IDPGroupList, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/team-sync/group-mappings", orgID, teamID) - - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - groups := new(IDPGroupList) - resp, err := s.client.Do(ctx, req, groups) - if err != nil { - return nil, resp, err - } - - return groups, resp, nil -} - -// CreateOrUpdateIDPGroupConnectionsBySlug creates, updates, or removes a connection -// between a team and an IDP group given organization name and team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#create-or-update-idp-group-connections -func (s *TeamsService) CreateOrUpdateIDPGroupConnectionsBySlug(ctx context.Context, org, slug string, opts IDPGroupList) (*IDPGroupList, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/team-sync/group-mappings", org, slug) - - req, err := s.client.NewRequest("PATCH", u, opts) - if err != nil { - return nil, nil, err - } - - groups := new(IDPGroupList) - resp, err := s.client.Do(ctx, req, groups) - if err != nil { - return nil, resp, err - } - - return groups, resp, nil -} - -// ExternalGroupMember represents a member of an external group. -type ExternalGroupMember struct { - MemberID *int64 `json:"member_id,omitempty"` - MemberLogin *string `json:"member_login,omitempty"` - MemberName *string `json:"member_name,omitempty"` - MemberEmail *string `json:"member_email,omitempty"` -} - -// ExternalGroupTeam represents a team connected to an external group. -type ExternalGroupTeam struct { - TeamID *int64 `json:"team_id,omitempty"` - TeamName *string `json:"team_name,omitempty"` -} - -// ExternalGroup represents an external group. -type ExternalGroup struct { - GroupID *int64 `json:"group_id,omitempty"` - GroupName *string `json:"group_name,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Teams []*ExternalGroupTeam `json:"teams,omitempty"` - Members []*ExternalGroupMember `json:"members,omitempty"` -} - -// ExternalGroupList represents a list of external groups. -type ExternalGroupList struct { - Groups []*ExternalGroup `json:"groups"` -} - -// GetExternalGroup fetches an external group. -// -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#get-an-external-group -func (s *TeamsService) GetExternalGroup(ctx context.Context, org string, groupID int64) (*ExternalGroup, *Response, error) { - u := fmt.Sprintf("orgs/%v/external-group/%v", org, groupID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - externalGroup := new(ExternalGroup) - resp, err := s.client.Do(ctx, req, externalGroup) - if err != nil { - return nil, resp, err - } - - return externalGroup, resp, nil -} - -// ListExternalGroupsOptions specifies the optional parameters to the -// TeamsService.ListExternalGroups method. -type ListExternalGroupsOptions struct { - DisplayName *string `url:"display_name,omitempty"` - - ListOptions -} - -// ListExternalGroups lists external groups in an organization on GitHub. -// -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#list-external-groups-in-an-organization -func (s *TeamsService) ListExternalGroups(ctx context.Context, org string, opts *ListExternalGroupsOptions) (*ExternalGroupList, *Response, error) { - u := fmt.Sprintf("orgs/%v/external-groups", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - externalGroups := new(ExternalGroupList) - resp, err := s.client.Do(ctx, req, externalGroups) - if err != nil { - return nil, resp, err - } - - return externalGroups, resp, nil -} - -// ListExternalGroupsForTeamBySlug lists external groups connected to a team on GitHub. -// -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#list-a-connection-between-an-external-group-and-a-team -func (s *TeamsService) ListExternalGroupsForTeamBySlug(ctx context.Context, org, slug string) (*ExternalGroupList, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/external-groups", org, slug) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - externalGroups := new(ExternalGroupList) - resp, err := s.client.Do(ctx, req, externalGroups) - if err != nil { - return nil, resp, err - } - - return externalGroups, resp, nil -} - -// UpdateConnectedExternalGroup updates the connection between an external group and a team. -// -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#update-the-connection-between-an-external-group-and-a-team -func (s *TeamsService) UpdateConnectedExternalGroup(ctx context.Context, org, slug string, eg *ExternalGroup) (*ExternalGroup, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/external-groups", org, slug) - - req, err := s.client.NewRequest("PATCH", u, eg) - if err != nil { - return nil, nil, err - } - - externalGroup := new(ExternalGroup) - resp, err := s.client.Do(ctx, req, externalGroup) - if err != nil { - return nil, resp, err - } - - return externalGroup, resp, nil -} - -// RemoveConnectedExternalGroup removes the connection between an external group and a team. -// -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#remove-the-connection-between-an-external-group-and-a-team -func (s *TeamsService) RemoveConnectedExternalGroup(ctx context.Context, org, slug string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/external-groups", org, slug) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_discussion_comments.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_discussion_comments.go deleted file mode 100644 index f3a1cc4dc0..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_discussion_comments.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// DiscussionComment represents a GitHub dicussion in a team. -type DiscussionComment struct { - Author *User `json:"author,omitempty"` - Body *string `json:"body,omitempty"` - BodyHTML *string `json:"body_html,omitempty"` - BodyVersion *string `json:"body_version,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - LastEditedAt *Timestamp `json:"last_edited_at,omitempty"` - DiscussionURL *string `json:"discussion_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Number *int `json:"number,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` -} - -func (c DiscussionComment) String() string { - return Stringify(c) -} - -// DiscussionCommentListOptions specifies optional parameters to the -// TeamServices.ListComments method. -type DiscussionCommentListOptions struct { - // Sorts the discussion comments by the date they were created. - // Accepted values are asc and desc. Default is desc. - Direction string `url:"direction,omitempty"` - ListOptions -} - -// ListCommentsByID lists all comments on a team discussion by team ID. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#list-discussion-comments -func (s *TeamsService) ListCommentsByID(ctx context.Context, orgID, teamID int64, discussionNumber int, options *DiscussionCommentListOptions) ([]*DiscussionComment, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments", orgID, teamID, discussionNumber) - u, err := addOptions(u, options) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var comments []*DiscussionComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// ListCommentsBySlug lists all comments on a team discussion by team slug. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#list-discussion-comments -func (s *TeamsService) ListCommentsBySlug(ctx context.Context, org, slug string, discussionNumber int, options *DiscussionCommentListOptions) ([]*DiscussionComment, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments", org, slug, discussionNumber) - u, err := addOptions(u, options) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var comments []*DiscussionComment - resp, err := s.client.Do(ctx, req, &comments) - if err != nil { - return nil, resp, err - } - - return comments, resp, nil -} - -// GetCommentByID gets a specific comment on a team discussion by team ID. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#get-a-discussion-comment -func (s *TeamsService) GetCommentByID(ctx context.Context, orgID, teamID int64, discussionNumber, commentNumber int) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v", orgID, teamID, discussionNumber, commentNumber) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// GetCommentBySlug gets a specific comment on a team discussion by team slug. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#get-a-discussion-comment -func (s *TeamsService) GetCommentBySlug(ctx context.Context, org, slug string, discussionNumber, commentNumber int) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v", org, slug, discussionNumber, commentNumber) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// CreateCommentByID creates a new comment on a team discussion by team ID. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#create-a-discussion-comment -func (s *TeamsService) CreateCommentByID(ctx context.Context, orgID, teamID int64, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments", orgID, teamID, discsusionNumber) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// CreateCommentBySlug creates a new comment on a team discussion by team slug. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#create-a-discussion-comment -func (s *TeamsService) CreateCommentBySlug(ctx context.Context, org, slug string, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments", org, slug, discsusionNumber) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// EditCommentByID edits the body text of a discussion comment by team ID. -// Authenticated user must grant write:discussion scope. -// User is allowed to edit body of a comment only. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#update-a-discussion-comment -func (s *TeamsService) EditCommentByID(ctx context.Context, orgID, teamID int64, discussionNumber, commentNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v", orgID, teamID, discussionNumber, commentNumber) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// EditCommentBySlug edits the body text of a discussion comment by team slug. -// Authenticated user must grant write:discussion scope. -// User is allowed to edit body of a comment only. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#update-a-discussion-comment -func (s *TeamsService) EditCommentBySlug(ctx context.Context, org, slug string, discussionNumber, commentNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v", org, slug, discussionNumber, commentNumber) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - discussionComment := &DiscussionComment{} - resp, err := s.client.Do(ctx, req, discussionComment) - if err != nil { - return nil, resp, err - } - - return discussionComment, resp, nil -} - -// DeleteCommentByID deletes a comment on a team discussion by team ID. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#delete-a-discussion-comment -func (s *TeamsService) DeleteCommentByID(ctx context.Context, orgID, teamID int64, discussionNumber, commentNumber int) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v", orgID, teamID, discussionNumber, commentNumber) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteCommentBySlug deletes a comment on a team discussion by team slug. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#delete-a-discussion-comment -func (s *TeamsService) DeleteCommentBySlug(ctx context.Context, org, slug string, discussionNumber, commentNumber int) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v", org, slug, discussionNumber, commentNumber) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_discussions.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_discussions.go deleted file mode 100644 index 69a3ebd51f..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_discussions.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// TeamDiscussion represents a GitHub dicussion in a team. -type TeamDiscussion struct { - Author *User `json:"author,omitempty"` - Body *string `json:"body,omitempty"` - BodyHTML *string `json:"body_html,omitempty"` - BodyVersion *string `json:"body_version,omitempty"` - CommentsCount *int `json:"comments_count,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - LastEditedAt *Timestamp `json:"last_edited_at,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Number *int `json:"number,omitempty"` - Pinned *bool `json:"pinned,omitempty"` - Private *bool `json:"private,omitempty"` - TeamURL *string `json:"team_url,omitempty"` - Title *string `json:"title,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - Reactions *Reactions `json:"reactions,omitempty"` -} - -func (d TeamDiscussion) String() string { - return Stringify(d) -} - -// DiscussionListOptions specifies optional parameters to the -// TeamServices.ListDiscussions method. -type DiscussionListOptions struct { - // Sorts the discussion by the date they were created. - // Accepted values are asc and desc. Default is desc. - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// ListDiscussionsByID lists all discussions on team's page given Organization and Team ID. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#list-discussions -func (s *TeamsService) ListDiscussionsByID(ctx context.Context, orgID, teamID int64, opts *DiscussionListOptions) ([]*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions", orgID, teamID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teamDiscussions []*TeamDiscussion - resp, err := s.client.Do(ctx, req, &teamDiscussions) - if err != nil { - return nil, resp, err - } - - return teamDiscussions, resp, nil -} - -// ListDiscussionsBySlug lists all discussions on team's page given Organization name and Team's slug. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#list-discussions -func (s *TeamsService) ListDiscussionsBySlug(ctx context.Context, org, slug string, opts *DiscussionListOptions) ([]*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions", org, slug) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teamDiscussions []*TeamDiscussion - resp, err := s.client.Do(ctx, req, &teamDiscussions) - if err != nil { - return nil, resp, err - } - - return teamDiscussions, resp, nil -} - -// GetDiscussionByID gets a specific discussion on a team's page given Organization and Team ID. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#get-a-discussion -func (s *TeamsService) GetDiscussionByID(ctx context.Context, orgID, teamID int64, discussionNumber int) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v", orgID, teamID, discussionNumber) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// GetDiscussionBySlug gets a specific discussion on a team's page given Organization name and Team's slug. -// Authenticated user must grant read:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#get-a-discussion -func (s *TeamsService) GetDiscussionBySlug(ctx context.Context, org, slug string, discussionNumber int) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v", org, slug, discussionNumber) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// CreateDiscussionByID creates a new discussion post on a team's page given Organization and Team ID. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#create-a-discussion -func (s *TeamsService) CreateDiscussionByID(ctx context.Context, orgID, teamID int64, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions", orgID, teamID) - req, err := s.client.NewRequest("POST", u, discussion) - if err != nil { - return nil, nil, err - } - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// CreateDiscussionBySlug creates a new discussion post on a team's page given Organization name and Team's slug. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#create-a-discussion -func (s *TeamsService) CreateDiscussionBySlug(ctx context.Context, org, slug string, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions", org, slug) - req, err := s.client.NewRequest("POST", u, discussion) - if err != nil { - return nil, nil, err - } - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// EditDiscussionByID edits the title and body text of a discussion post given Organization and Team ID. -// Authenticated user must grant write:discussion scope. -// User is allowed to change Title and Body of a discussion only. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#update-a-discussion -func (s *TeamsService) EditDiscussionByID(ctx context.Context, orgID, teamID int64, discussionNumber int, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v", orgID, teamID, discussionNumber) - req, err := s.client.NewRequest("PATCH", u, discussion) - if err != nil { - return nil, nil, err - } - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// EditDiscussionBySlug edits the title and body text of a discussion post given Organization name and Team's slug. -// Authenticated user must grant write:discussion scope. -// User is allowed to change Title and Body of a discussion only. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#update-a-discussion -func (s *TeamsService) EditDiscussionBySlug(ctx context.Context, org, slug string, discussionNumber int, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v", org, slug, discussionNumber) - req, err := s.client.NewRequest("PATCH", u, discussion) - if err != nil { - return nil, nil, err - } - - teamDiscussion := &TeamDiscussion{} - resp, err := s.client.Do(ctx, req, teamDiscussion) - if err != nil { - return nil, resp, err - } - - return teamDiscussion, resp, nil -} - -// DeleteDiscussionByID deletes a discussion from team's page given Organization and Team ID. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#delete-a-discussion -func (s *TeamsService) DeleteDiscussionByID(ctx context.Context, orgID, teamID int64, discussionNumber int) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v", orgID, teamID, discussionNumber) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeleteDiscussionBySlug deletes a discussion from team's page given Organization name and Team's slug. -// Authenticated user must grant write:discussion scope. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#delete-a-discussion -func (s *TeamsService) DeleteDiscussionBySlug(ctx context.Context, org, slug string, discussionNumber int) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v", org, slug, discussionNumber) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_members.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_members.go deleted file mode 100644 index 58cb79744e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/teams_members.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2018 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// TeamListTeamMembersOptions specifies the optional parameters to the -// TeamsService.ListTeamMembers method. -type TeamListTeamMembersOptions struct { - // Role filters members returned by their role in the team. Possible - // values are "all", "member", "maintainer". Default is "all". - Role string `url:"role,omitempty"` - - ListOptions -} - -// ListTeamMembersByID lists all of the users who are members of a team, given a specified -// organization ID, by team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-team-members -func (s *TeamsService) ListTeamMembersByID(ctx context.Context, orgID, teamID int64, opts *TeamListTeamMembersOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/members", orgID, teamID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var members []*User - resp, err := s.client.Do(ctx, req, &members) - if err != nil { - return nil, resp, err - } - - return members, resp, nil -} - -// ListTeamMembersBySlug lists all of the users who are members of a team, given a specified -// organization name, by team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-team-members -func (s *TeamsService) ListTeamMembersBySlug(ctx context.Context, org, slug string, opts *TeamListTeamMembersOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/members", org, slug) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var members []*User - resp, err := s.client.Do(ctx, req, &members) - if err != nil { - return nil, resp, err - } - - return members, resp, nil -} - -// GetTeamMembershipByID returns the membership status for a user in a team, given a specified -// organization ID, by team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-team-members -func (s *TeamsService) GetTeamMembershipByID(ctx context.Context, orgID, teamID int64, user string) (*Membership, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/memberships/%v", orgID, teamID, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Membership) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// GetTeamMembershipBySlug returns the membership status for a user in a team, given a specified -// organization name, by team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#get-team-membership-for-a-user -func (s *TeamsService) GetTeamMembershipBySlug(ctx context.Context, org, slug, user string) (*Membership, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/memberships/%v", org, slug, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Membership) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// TeamAddTeamMembershipOptions specifies the optional -// parameters to the TeamsService.AddTeamMembership method. -type TeamAddTeamMembershipOptions struct { - // Role specifies the role the user should have in the team. Possible - // values are: - // member - a normal member of the team - // maintainer - a team maintainer. Able to add/remove other team - // members, promote other team members to team - // maintainer, and edit the team’s name and description - // - // Default value is "member". - Role string `json:"role,omitempty"` -} - -// AddTeamMembershipByID adds or invites a user to a team, given a specified -// organization ID, by team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#add-or-update-team-membership-for-a-user -func (s *TeamsService) AddTeamMembershipByID(ctx context.Context, orgID, teamID int64, user string, opts *TeamAddTeamMembershipOptions) (*Membership, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/memberships/%v", orgID, teamID, user) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, nil, err - } - - t := new(Membership) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// AddTeamMembershipBySlug adds or invites a user to a team, given a specified -// organization name, by team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#add-or-update-team-membership-for-a-user -func (s *TeamsService) AddTeamMembershipBySlug(ctx context.Context, org, slug, user string, opts *TeamAddTeamMembershipOptions) (*Membership, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/memberships/%v", org, slug, user) - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, nil, err - } - - t := new(Membership) - resp, err := s.client.Do(ctx, req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// RemoveTeamMembershipByID removes a user from a team, given a specified -// organization ID, by team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#remove-team-membership-for-a-user -func (s *TeamsService) RemoveTeamMembershipByID(ctx context.Context, orgID, teamID int64, user string) (*Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/memberships/%v", orgID, teamID, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// RemoveTeamMembershipBySlug removes a user from a team, given a specified -// organization name, by team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#remove-team-membership-for-a-user -func (s *TeamsService) RemoveTeamMembershipBySlug(ctx context.Context, org, slug, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/memberships/%v", org, slug, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// ListPendingTeamInvitationsByID gets pending invitation list of a team, given a specified -// organization ID, by team ID. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-pending-team-invitations -func (s *TeamsService) ListPendingTeamInvitationsByID(ctx context.Context, orgID, teamID int64, opts *ListOptions) ([]*Invitation, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/invitations", orgID, teamID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pendingInvitations []*Invitation - resp, err := s.client.Do(ctx, req, &pendingInvitations) - if err != nil { - return nil, resp, err - } - - return pendingInvitations, resp, nil -} - -// ListPendingTeamInvitationsBySlug get pending invitation list of a team, given a specified -// organization name, by team slug. -// -// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-pending-team-invitations -func (s *TeamsService) ListPendingTeamInvitationsBySlug(ctx context.Context, org, slug string, opts *ListOptions) ([]*Invitation, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams/%v/invitations", org, slug) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pendingInvitations []*Invitation - resp, err := s.client.Do(ctx, req, &pendingInvitations) - if err != nil { - return nil, resp, err - } - - return pendingInvitations, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/timestamp.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/timestamp.go deleted file mode 100644 index 00c1235e9d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/timestamp.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "strconv" - "time" -) - -// Timestamp represents a time that can be unmarshalled from a JSON string -// formatted as either an RFC3339 or Unix timestamp. This is necessary for some -// fields since the GitHub API is inconsistent in how it represents times. All -// exported methods of time.Time can be called on Timestamp. -type Timestamp struct { - time.Time -} - -func (t Timestamp) String() string { - return t.Time.String() -} - -// GetTime returns std time.Time. -func (t *Timestamp) GetTime() *time.Time { - if t == nil { - return nil - } - return &t.Time -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Time is expected in RFC3339 or Unix format. -func (t *Timestamp) UnmarshalJSON(data []byte) (err error) { - str := string(data) - i, err := strconv.ParseInt(str, 10, 64) - if err == nil { - t.Time = time.Unix(i, 0) - if t.Time.Year() > 3000 { - t.Time = time.Unix(0, i*1e6) - } - } else { - t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) - } - return -} - -// Equal reports whether t and u are equal based on time.Equal -func (t Timestamp) Equal(u Timestamp) bool { - return t.Time.Equal(u.Time) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users.go deleted file mode 100644 index 1b0670103b..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// UsersService handles communication with the user related -// methods of the GitHub API. -// -// GitHub API docs: https://docs.github.com/en/rest/users/ -type UsersService service - -// User represents a GitHub user. -type User struct { - Login *string `json:"login,omitempty"` - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - Name *string `json:"name,omitempty"` - Company *string `json:"company,omitempty"` - Blog *string `json:"blog,omitempty"` - Location *string `json:"location,omitempty"` - Email *string `json:"email,omitempty"` - Hireable *bool `json:"hireable,omitempty"` - Bio *string `json:"bio,omitempty"` - TwitterUsername *string `json:"twitter_username,omitempty"` - PublicRepos *int `json:"public_repos,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` - Followers *int `json:"followers,omitempty"` - Following *int `json:"following,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - SuspendedAt *Timestamp `json:"suspended_at,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin,omitempty"` - TotalPrivateRepos *int64 `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int64 `json:"owned_private_repos,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - DiskUsage *int `json:"disk_usage,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - TwoFactorAuthentication *bool `json:"two_factor_authentication,omitempty"` - Plan *Plan `json:"plan,omitempty"` - LdapDn *string `json:"ldap_dn,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://docs.github.com/en/rest/search/#text-match-metadata - TextMatches []*TextMatch `json:"text_matches,omitempty"` - - // Permissions and RoleName identify the permissions and role that a user has on a given - // repository. These are only populated when calling Repositories.ListCollaborators. - Permissions map[string]bool `json:"permissions,omitempty"` - RoleName *string `json:"role_name,omitempty"` -} - -func (u User) String() string { - return Stringify(u) -} - -// Get fetches a user. Passing the empty string will fetch the authenticated -// user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/users#get-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/users/users#get-a-user -func (s *UsersService) Get(ctx context.Context, user string) (*User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v", user) - } else { - u = "user" - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - uResp := new(User) - resp, err := s.client.Do(ctx, req, uResp) - if err != nil { - return nil, resp, err - } - - return uResp, resp, nil -} - -// GetByID fetches a user. -// -// Note: GetByID uses the undocumented GitHub API endpoint /user/:id. -func (s *UsersService) GetByID(ctx context.Context, id int64) (*User, *Response, error) { - u := fmt.Sprintf("user/%d", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - user := new(User) - resp, err := s.client.Do(ctx, req, user) - if err != nil { - return nil, resp, err - } - - return user, resp, nil -} - -// Edit the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/users#update-the-authenticated-user -func (s *UsersService) Edit(ctx context.Context, user *User) (*User, *Response, error) { - u := "user" - req, err := s.client.NewRequest("PATCH", u, user) - if err != nil { - return nil, nil, err - } - - uResp := new(User) - resp, err := s.client.Do(ctx, req, uResp) - if err != nil { - return nil, resp, err - } - - return uResp, resp, nil -} - -// HovercardOptions specifies optional parameters to the UsersService.GetHovercard -// method. -type HovercardOptions struct { - // SubjectType specifies the additional information to be received about the hovercard. - // Possible values are: organization, repository, issue, pull_request. (Required when using subject_id.) - SubjectType string `url:"subject_type"` - - // SubjectID specifies the ID for the SubjectType. (Required when using subject_type.) - SubjectID string `url:"subject_id"` -} - -// Hovercard represents hovercard information about a user. -type Hovercard struct { - Contexts []*UserContext `json:"contexts,omitempty"` -} - -// UserContext represents the contextual information about user. -type UserContext struct { - Message *string `json:"message,omitempty"` - Octicon *string `json:"octicon,omitempty"` -} - -// GetHovercard fetches contextual information about user. It requires authentication -// via Basic Auth or via OAuth with the repo scope. -// -// GitHub API docs: https://docs.github.com/en/rest/users/users#get-contextual-information-for-a-user -func (s *UsersService) GetHovercard(ctx context.Context, user string, opts *HovercardOptions) (*Hovercard, *Response, error) { - u := fmt.Sprintf("users/%v/hovercard", user) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - hc := new(Hovercard) - resp, err := s.client.Do(ctx, req, hc) - if err != nil { - return nil, resp, err - } - - return hc, resp, nil -} - -// UserListOptions specifies optional parameters to the UsersService.ListAll -// method. -type UserListOptions struct { - // ID of the last user seen - Since int64 `url:"since,omitempty"` - - // Note: Pagination is powered exclusively by the Since parameter, - // ListOptions.Page has no effect. - // ListOptions.PerPage controls an undocumented GitHub API parameter. - ListOptions -} - -// ListAll lists all GitHub users. -// -// To paginate through all users, populate 'Since' with the ID of the last user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/users#list-users -func (s *UsersService) ListAll(ctx context.Context, opts *UserListOptions) ([]*User, *Response, error) { - u, err := addOptions("users", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// ListInvitations lists all currently-open repository invitations for the -// authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#list-repository-invitations-for-the-authenticated-user -func (s *UsersService) ListInvitations(ctx context.Context, opts *ListOptions) ([]*RepositoryInvitation, *Response, error) { - u, err := addOptions("user/repository_invitations", opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - invites := []*RepositoryInvitation{} - resp, err := s.client.Do(ctx, req, &invites) - if err != nil { - return nil, resp, err - } - - return invites, resp, nil -} - -// AcceptInvitation accepts the currently-open repository invitation for the -// authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#accept-a-repository-invitation -func (s *UsersService) AcceptInvitation(ctx context.Context, invitationID int64) (*Response, error) { - u := fmt.Sprintf("user/repository_invitations/%v", invitationID) - req, err := s.client.NewRequest("PATCH", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DeclineInvitation declines the currently-open repository invitation for the -// authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#decline-a-repository-invitation -func (s *UsersService) DeclineInvitation(ctx context.Context, invitationID int64) (*Response, error) { - u := fmt.Sprintf("user/repository_invitations/%v", invitationID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_administration.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_administration.go deleted file mode 100644 index aef947ec45..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_administration.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// PromoteSiteAdmin promotes a user to a site administrator of a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#promote-an-ordinary-user-to-a-site-administrator -func (s *UsersService) PromoteSiteAdmin(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("users/%v/site_admin", user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// DemoteSiteAdmin demotes a user from site administrator of a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#demote-a-site-administrator-to-an-ordinary-user -func (s *UsersService) DemoteSiteAdmin(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("users/%v/site_admin", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// UserSuspendOptions represents the reason a user is being suspended. -type UserSuspendOptions struct { - Reason *string `json:"reason,omitempty"` -} - -// Suspend a user on a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#suspend-a-user -func (s *UsersService) Suspend(ctx context.Context, user string, opts *UserSuspendOptions) (*Response, error) { - u := fmt.Sprintf("users/%v/suspended", user) - - req, err := s.client.NewRequest("PUT", u, opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unsuspend a user on a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/enterprise/v3/enterprise-admin/users/#unsuspend-a-user -func (s *UsersService) Unsuspend(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("users/%v/suspended", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_blocking.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_blocking.go deleted file mode 100644 index 3d38d94789..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_blocking.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListBlockedUsers lists all the blocked users by the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/blocking#list-users-blocked-by-the-authenticated-user -func (s *UsersService) ListBlockedUsers(ctx context.Context, opts *ListOptions) ([]*User, *Response, error) { - u := "user/blocks" - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - var blockedUsers []*User - resp, err := s.client.Do(ctx, req, &blockedUsers) - if err != nil { - return nil, resp, err - } - - return blockedUsers, resp, nil -} - -// IsBlocked reports whether specified user is blocked by the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/blocking#check-if-a-user-is-blocked-by-the-authenticated-user -func (s *UsersService) IsBlocked(ctx context.Context, user string) (bool, *Response, error) { - u := fmt.Sprintf("user/blocks/%v", user) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - resp, err := s.client.Do(ctx, req, nil) - isBlocked, err := parseBoolResponse(err) - return isBlocked, resp, err -} - -// BlockUser blocks specified user for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/blocking#block-a-user -func (s *UsersService) BlockUser(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/blocks/%v", user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} - -// UnblockUser unblocks specified user for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/blocking#unblock-a-user -func (s *UsersService) UnblockUser(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/blocks/%v", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeBlockUsersPreview) - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_emails.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_emails.go deleted file mode 100644 index 67bd210e8d..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_emails.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "context" - -// UserEmail represents user's email address -type UserEmail struct { - Email *string `json:"email,omitempty"` - Primary *bool `json:"primary,omitempty"` - Verified *bool `json:"verified,omitempty"` - Visibility *string `json:"visibility,omitempty"` -} - -// ListEmails lists all email addresses for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/emails#list-email-addresses-for-the-authenticated-user -func (s *UsersService) ListEmails(ctx context.Context, opts *ListOptions) ([]*UserEmail, *Response, error) { - u := "user/emails" - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var emails []*UserEmail - resp, err := s.client.Do(ctx, req, &emails) - if err != nil { - return nil, resp, err - } - - return emails, resp, nil -} - -// AddEmails adds email addresses of the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/emails#add-an-email-address-for-the-authenticated-user -func (s *UsersService) AddEmails(ctx context.Context, emails []string) ([]*UserEmail, *Response, error) { - u := "user/emails" - req, err := s.client.NewRequest("POST", u, emails) - if err != nil { - return nil, nil, err - } - - var e []*UserEmail - resp, err := s.client.Do(ctx, req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// DeleteEmails deletes email addresses from authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/emails#delete-an-email-address-for-the-authenticated-user -func (s *UsersService) DeleteEmails(ctx context.Context, emails []string) (*Response, error) { - u := "user/emails" - req, err := s.client.NewRequest("DELETE", u, emails) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// SetEmailVisibility sets the visibility for the primary email address of the authenticated user. -// `visibility` can be "private" or "public". -// -// GitHub API docs: https://docs.github.com/en/rest/users/emails#set-primary-email-visibility-for-the-authenticated-user -func (s *UsersService) SetEmailVisibility(ctx context.Context, visibility string) ([]*UserEmail, *Response, error) { - u := "user/email/visibility" - - updateVisiblilityReq := &UserEmail{ - Visibility: &visibility, - } - - req, err := s.client.NewRequest("PATCH", u, updateVisiblilityReq) - if err != nil { - return nil, nil, err - } - - var e []*UserEmail - resp, err := s.client.Do(ctx, req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_followers.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_followers.go deleted file mode 100644 index 1266e0e9ee..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_followers.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListFollowers lists the followers for a user. Passing the empty string will -// fetch followers for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-followers-of-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-followers-of-a-user -func (s *UsersService) ListFollowers(ctx context.Context, user string, opts *ListOptions) ([]*User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/followers", user) - } else { - u = "user/followers" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// ListFollowing lists the people that a user is following. Passing the empty -// string will list people the authenticated user is following. -// -// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-the-people-the-authenticated-user-follows -// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-the-people-a-user-follows -func (s *UsersService) ListFollowing(ctx context.Context, user string, opts *ListOptions) ([]*User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/following", user) - } else { - u = "user/following" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} - -// IsFollowing checks if "user" is following "target". Passing the empty -// string for "user" will check if the authenticated user is following "target". -// -// GitHub API docs: https://docs.github.com/en/rest/users/followers#check-if-a-person-is-followed-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/users/followers#check-if-a-user-follows-another-user -func (s *UsersService) IsFollowing(ctx context.Context, user, target string) (bool, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/following/%v", user, target) - } else { - u = fmt.Sprintf("user/following/%v", target) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - following, err := parseBoolResponse(err) - return following, resp, err -} - -// Follow will cause the authenticated user to follow the specified user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/followers#follow-a-user -func (s *UsersService) Follow(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/following/%v", user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Unfollow will cause the authenticated user to unfollow the specified user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/followers#unfollow-a-user -func (s *UsersService) Unfollow(ctx context.Context, user string) (*Response, error) { - u := fmt.Sprintf("user/following/%v", user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_gpg_keys.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_gpg_keys.go deleted file mode 100644 index 54189b8307..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_gpg_keys.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2016 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GPGKey represents a GitHub user's public GPG key used to verify GPG signed commits and tags. -// -// https://developer.github.com/changes/2016-04-04-git-signing-api-preview/ -type GPGKey struct { - ID *int64 `json:"id,omitempty"` - PrimaryKeyID *int64 `json:"primary_key_id,omitempty"` - KeyID *string `json:"key_id,omitempty"` - RawKey *string `json:"raw_key,omitempty"` - PublicKey *string `json:"public_key,omitempty"` - Emails []*GPGEmail `json:"emails,omitempty"` - Subkeys []*GPGKey `json:"subkeys,omitempty"` - CanSign *bool `json:"can_sign,omitempty"` - CanEncryptComms *bool `json:"can_encrypt_comms,omitempty"` - CanEncryptStorage *bool `json:"can_encrypt_storage,omitempty"` - CanCertify *bool `json:"can_certify,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - ExpiresAt *Timestamp `json:"expires_at,omitempty"` -} - -// String stringifies a GPGKey. -func (k GPGKey) String() string { - return Stringify(k) -} - -// GPGEmail represents an email address associated to a GPG key. -type GPGEmail struct { - Email *string `json:"email,omitempty"` - Verified *bool `json:"verified,omitempty"` -} - -// ListGPGKeys lists the public GPG keys for a user. Passing the empty -// string will fetch keys for the authenticated user. It requires authentication -// via Basic Auth or via OAuth with at least read:gpg_key scope. -// -// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#list-gpg-keys-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#list-gpg-keys-for-a-user -func (s *UsersService) ListGPGKeys(ctx context.Context, user string, opts *ListOptions) ([]*GPGKey, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/gpg_keys", user) - } else { - u = "user/gpg_keys" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var keys []*GPGKey - resp, err := s.client.Do(ctx, req, &keys) - if err != nil { - return nil, resp, err - } - - return keys, resp, nil -} - -// GetGPGKey gets extended details for a single GPG key. It requires authentication -// via Basic Auth or via OAuth with at least read:gpg_key scope. -// -// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#get-a-gpg-key-for-the-authenticated-user -func (s *UsersService) GetGPGKey(ctx context.Context, id int64) (*GPGKey, *Response, error) { - u := fmt.Sprintf("user/gpg_keys/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := &GPGKey{} - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// CreateGPGKey creates a GPG key. It requires authenticatation via Basic Auth -// or OAuth with at least write:gpg_key scope. -// -// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#create-a-gpg-key -func (s *UsersService) CreateGPGKey(ctx context.Context, armoredPublicKey string) (*GPGKey, *Response, error) { - gpgKey := &struct { - ArmoredPublicKey string `json:"armored_public_key"` - }{ArmoredPublicKey: armoredPublicKey} - req, err := s.client.NewRequest("POST", "user/gpg_keys", gpgKey) - if err != nil { - return nil, nil, err - } - - key := &GPGKey{} - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// DeleteGPGKey deletes a GPG key. It requires authentication via Basic Auth or -// via OAuth with at least admin:gpg_key scope. -// -// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#delete-a-gpg-key-for-the-authenticated-user -func (s *UsersService) DeleteGPGKey(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("user/gpg_keys/%v", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_keys.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_keys.go deleted file mode 100644 index b49b8e4b4e..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_keys.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// Key represents a public SSH key used to authenticate a user or deploy script. -type Key struct { - ID *int64 `json:"id,omitempty"` - Key *string `json:"key,omitempty"` - URL *string `json:"url,omitempty"` - Title *string `json:"title,omitempty"` - ReadOnly *bool `json:"read_only,omitempty"` - Verified *bool `json:"verified,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - AddedBy *string `json:"added_by,omitempty"` - LastUsed *Timestamp `json:"last_used,omitempty"` -} - -func (k Key) String() string { - return Stringify(k) -} - -// ListKeys lists the verified public keys for a user. Passing the empty -// string will fetch keys for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/keys#list-public-ssh-keys-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/users/keys#list-public-keys-for-a-user -func (s *UsersService) ListKeys(ctx context.Context, user string, opts *ListOptions) ([]*Key, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/keys", user) - } else { - u = "user/keys" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var keys []*Key - resp, err := s.client.Do(ctx, req, &keys) - if err != nil { - return nil, resp, err - } - - return keys, resp, nil -} - -// GetKey fetches a single public key. -// -// GitHub API docs: https://docs.github.com/en/rest/users/keys#get-a-public-ssh-key-for-the-authenticated-user -func (s *UsersService) GetKey(ctx context.Context, id int64) (*Key, *Response, error) { - u := fmt.Sprintf("user/keys/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := new(Key) - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// CreateKey adds a public key for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/keys#create-a-public-ssh-key-for-the-authenticated-user -func (s *UsersService) CreateKey(ctx context.Context, key *Key) (*Key, *Response, error) { - u := "user/keys" - - req, err := s.client.NewRequest("POST", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(ctx, req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteKey deletes a public key. -// -// GitHub API docs: https://docs.github.com/en/rest/users/keys#delete-a-public-ssh-key-for-the-authenticated-user -func (s *UsersService) DeleteKey(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("user/keys/%v", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_packages.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_packages.go deleted file mode 100644 index da04919ecc..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_packages.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// List the packages for a user. Passing the empty string for "user" will -// list packages for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#list-packages-for-the-authenticated-users-namespace -// GitHub API docs: https://docs.github.com/en/rest/packages#list-packages-for-a-user -func (s *UsersService) ListPackages(ctx context.Context, user string, opts *PackageListOptions) ([]*Package, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages", user) - } else { - u = "user/packages" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var packages []*Package - resp, err := s.client.Do(ctx, req, &packages) - if err != nil { - return nil, resp, err - } - - return packages, resp, nil -} - -// Get a package by name for a user. Passing the empty string for "user" will -// get the package for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-for-a-user -func (s *UsersService) GetPackage(ctx context.Context, user, packageType, packageName string) (*Package, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages/%v/%v", user, packageType, packageName) - } else { - u = fmt.Sprintf("user/packages/%v/%v", packageType, packageName) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pack *Package - resp, err := s.client.Do(ctx, req, &pack) - if err != nil { - return nil, resp, err - } - - return pack, resp, nil -} - -// Delete a package from a user. Passing the empty string for "user" will -// delete the package for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-for-a-user -func (s *UsersService) DeletePackage(ctx context.Context, user, packageType, packageName string) (*Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages/%v/%v", user, packageType, packageName) - } else { - u = fmt.Sprintf("user/packages/%v/%v", packageType, packageName) - } - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Restore a package to a user. Passing the empty string for "user" will -// restore the package for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-for-a-user -func (s *UsersService) RestorePackage(ctx context.Context, user, packageType, packageName string) (*Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages/%v/%v/restore", user, packageType, packageName) - } else { - u = fmt.Sprintf("user/packages/%v/%v/restore", packageType, packageName) - } - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Get all versions of a package for a user. Passing the empty string for "user" will -// get versions for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#get-all-package-versions-for-a-package-owned-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/packages#get-all-package-versions-for-a-package-owned-by-a-user -func (s *UsersService) PackageGetAllVersions(ctx context.Context, user, packageType, packageName string, opts *PackageListOptions) ([]*PackageVersion, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages/%v/%v/versions", user, packageType, packageName) - } else { - u = fmt.Sprintf("user/packages/%v/%v/versions", packageType, packageName) - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var versions []*PackageVersion - resp, err := s.client.Do(ctx, req, &versions) - if err != nil { - return nil, resp, err - } - - return versions, resp, nil -} - -// Get a specific version of a package for a user. Passing the empty string for "user" will -// get the version for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-version-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-version-for-a-user -func (s *UsersService) PackageGetVersion(ctx context.Context, user, packageType, packageName string, packageVersionID int64) (*PackageVersion, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages/%v/%v/versions/%v", user, packageType, packageName, packageVersionID) - } else { - u = fmt.Sprintf("user/packages/%v/%v/versions/%v", packageType, packageName, packageVersionID) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var version *PackageVersion - resp, err := s.client.Do(ctx, req, &version) - if err != nil { - return nil, resp, err - } - - return version, resp, nil -} - -// Delete a package version for a user. Passing the empty string for "user" will -// delete the version for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-version-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/packages#delete-package-version-for-a-user -func (s *UsersService) PackageDeleteVersion(ctx context.Context, user, packageType, packageName string, packageVersionID int64) (*Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages/%v/%v/versions/%v", user, packageType, packageName, packageVersionID) - } else { - u = fmt.Sprintf("user/packages/%v/%v/versions/%v", packageType, packageName, packageVersionID) - } - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} - -// Restore a package version to a user. Passing the empty string for "user" will -// restore the version for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-version-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/packages#restore-package-version-for-a-user -func (s *UsersService) PackageRestoreVersion(ctx context.Context, user, packageType, packageName string, packageVersionID int64) (*Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/packages/%v/%v/versions/%v/restore", user, packageType, packageName, packageVersionID) - } else { - u = fmt.Sprintf("user/packages/%v/%v/versions/%v/restore", packageType, packageName, packageVersionID) - } - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_projects.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_projects.go deleted file mode 100644 index 0cbd61f923..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_projects.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2019 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// ListProjects lists the projects for the specified user. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-user-projects -func (s *UsersService) ListProjects(ctx context.Context, user string, opts *ProjectListOptions) ([]*Project, *Response, error) { - u := fmt.Sprintf("users/%v/projects", user) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - var projects []*Project - resp, err := s.client.Do(ctx, req, &projects) - if err != nil { - return nil, resp, err - } - - return projects, resp, nil -} - -// CreateUserProjectOptions specifies the parameters to the UsersService.CreateProject method. -type CreateUserProjectOptions struct { - // The name of the project. (Required.) - Name string `json:"name"` - // The description of the project. (Optional.) - Body *string `json:"body,omitempty"` -} - -// CreateProject creates a GitHub Project for the current user. -// -// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-a-user-project -func (s *UsersService) CreateProject(ctx context.Context, opts *CreateUserProjectOptions) (*Project, *Response, error) { - u := "user/projects" - req, err := s.client.NewRequest("POST", u, opts) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeProjectsPreview) - - project := &Project{} - resp, err := s.client.Do(ctx, req, project) - if err != nil { - return nil, resp, err - } - - return project, resp, nil -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go deleted file mode 100644 index 567623f887..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// SSHSigningKey represents a public SSH key used to sign git commits. -type SSHSigningKey struct { - ID *int64 `json:"id,omitempty"` - Key *string `json:"key,omitempty"` - Title *string `json:"title,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` -} - -func (k SSHSigningKey) String() string { - return Stringify(k) -} - -// ListSSHSigningKeys lists the SSH signing keys for a user. Passing an empty -// username string will fetch SSH signing keys for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#list-ssh-signing-keys-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#list-ssh-signing-keys-for-a-user -func (s *UsersService) ListSSHSigningKeys(ctx context.Context, user string, opts *ListOptions) ([]*SSHSigningKey, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/ssh_signing_keys", user) - } else { - u = "user/ssh_signing_keys" - } - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var keys []*SSHSigningKey - resp, err := s.client.Do(ctx, req, &keys) - if err != nil { - return nil, resp, err - } - - return keys, resp, nil -} - -// GetSSHSigningKey fetches a single SSH signing key for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#get-an-ssh-signing-key-for-the-authenticated-user -func (s *UsersService) GetSSHSigningKey(ctx context.Context, id int64) (*SSHSigningKey, *Response, error) { - u := fmt.Sprintf("user/ssh_signing_keys/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := new(SSHSigningKey) - resp, err := s.client.Do(ctx, req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, nil -} - -// CreateSSHSigningKey adds a SSH signing key for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#create-a-ssh-signing-key-for-the-authenticated-user -func (s *UsersService) CreateSSHSigningKey(ctx context.Context, key *Key) (*SSHSigningKey, *Response, error) { - u := "user/ssh_signing_keys" - - req, err := s.client.NewRequest("POST", u, key) - if err != nil { - return nil, nil, err - } - - k := new(SSHSigningKey) - resp, err := s.client.Do(ctx, req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteKey deletes a SSH signing key for the authenticated user. -// -// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#delete-an-ssh-signing-key-for-the-authenticated-user -func (s *UsersService) DeleteSSHSigningKey(ctx context.Context, id int64) (*Response, error) { - u := fmt.Sprintf("user/ssh_signing_keys/%v", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/with_appengine.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/with_appengine.go deleted file mode 100644 index 9053ce1059..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/with_appengine.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -// This file provides glue for making github work on App Engine. - -package github - -import ( - "context" - "net/http" -) - -func withContext(ctx context.Context, req *http.Request) *http.Request { - // No-op because App Engine adds context to a request differently. - return req -} diff --git a/openshift/tools/vendor/github.com/google/go-github/v53/github/without_appengine.go b/openshift/tools/vendor/github.com/google/go-github/v53/github/without_appengine.go deleted file mode 100644 index 0024ae4157..0000000000 --- a/openshift/tools/vendor/github.com/google/go-github/v53/github/without_appengine.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !appengine -// +build !appengine - -// This file provides glue for making github work without App Engine. - -package github - -import ( - "context" - "net/http" -) - -func withContext(ctx context.Context, req *http.Request) *http.Request { - return req.WithContext(ctx) -} diff --git a/openshift/tools/vendor/github.com/google/go-querystring/LICENSE b/openshift/tools/vendor/github.com/google/go-querystring/LICENSE deleted file mode 100644 index ae121a1e46..0000000000 --- a/openshift/tools/vendor/github.com/google/go-querystring/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Google. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/google/go-querystring/query/encode.go b/openshift/tools/vendor/github.com/google/go-querystring/query/encode.go deleted file mode 100644 index 91198f819a..0000000000 --- a/openshift/tools/vendor/github.com/google/go-querystring/query/encode.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package query implements encoding of structs into URL query parameters. -// -// As a simple example: -// -// type Options struct { -// Query string `url:"q"` -// ShowAll bool `url:"all"` -// Page int `url:"page"` -// } -// -// opt := Options{ "foo", true, 2 } -// v, _ := query.Values(opt) -// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" -// -// The exact mapping between Go values and url.Values is described in the -// documentation for the Values() function. -package query - -import ( - "bytes" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var timeType = reflect.TypeOf(time.Time{}) - -var encoderType = reflect.TypeOf(new(Encoder)).Elem() - -// Encoder is an interface implemented by any type that wishes to encode -// itself into URL values in a non-standard way. -type Encoder interface { - EncodeValues(key string, v *url.Values) error -} - -// Values returns the url.Values encoding of v. -// -// Values expects to be passed a struct, and traverses it recursively using the -// following encoding rules. -// -// Each exported struct field is encoded as a URL parameter unless -// -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option -// -// The empty values are false, 0, any nil pointer or interface value, any array -// slice, map, or string of length zero, and any type (such as time.Time) that -// returns true for IsZero(). -// -// The URL parameter name defaults to the struct field name but can be -// specified in the struct field's tag value. The "url" key in the struct -// field's tag value is the key name, followed by an optional comma and -// options. For example: -// -// // Field is ignored by this package. -// Field int `url:"-"` -// -// // Field appears as URL parameter "myName". -// Field int `url:"myName"` -// -// // Field appears as URL parameter "myName" and the field is omitted if -// // its value is empty -// Field int `url:"myName,omitempty"` -// -// // Field appears as URL parameter "Field" (the default), but the field -// // is skipped if empty. Note the leading comma. -// Field int `url:",omitempty"` -// -// For encoding individual field values, the following type-dependent rules -// apply: -// -// Boolean values default to encoding as the strings "true" or "false". -// Including the "int" option signals that the field should be encoded as the -// strings "1" or "0". -// -// time.Time values default to encoding as RFC3339 timestamps. Including the -// "unix" option signals that the field should be encoded as a Unix time (see -// time.Unix()). The "unixmilli" and "unixnano" options will encode the number -// of milliseconds and nanoseconds, respectively, since January 1, 1970 (see -// time.UnixNano()). Including the "layout" struct tag (separate from the -// "url" tag) will use the value of the "layout" tag as a layout passed to -// time.Format. For example: -// -// // Encode a time.Time as YYYY-MM-DD -// Field time.Time `layout:"2006-01-02"` -// -// Slice and Array values default to encoding as multiple URL values of the -// same name. Including the "comma" option signals that the field should be -// encoded as a single comma-delimited value. Including the "space" option -// similarly encodes the value as a single space-delimited string. Including -// the "semicolon" option will encode the value as a semicolon-delimited string. -// Including the "brackets" option signals that the multiple URL values should -// have "[]" appended to the value name. "numbered" will append a number to -// the end of each incidence of the value name, example: -// name0=value0&name1=value1, etc. Including the "del" struct tag (separate -// from the "url" tag) will use the value of the "del" tag as the delimiter. -// For example: -// -// // Encode a slice of bools as ints ("1" for true, "0" for false), -// // separated by exclamation points "!". -// Field []bool `url:",int" del:"!"` -// -// Anonymous struct fields are usually encoded as if their inner exported -// fields were fields in the outer struct, subject to the standard Go -// visibility rules. An anonymous struct field with a name given in its URL -// tag is treated as having that name, rather than being anonymous. -// -// Non-nil pointer values are encoded as the value pointed to. -// -// Nested structs are encoded including parent fields in value names for -// scoping. e.g: -// -// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" -// -// All other values are encoded using their default string representation. -// -// Multiple fields that encode to the same URL parameter name will be included -// as multiple URL values of the same name. -func Values(v interface{}) (url.Values, error) { - values := make(url.Values) - val := reflect.ValueOf(v) - for val.Kind() == reflect.Ptr { - if val.IsNil() { - return values, nil - } - val = val.Elem() - } - - if v == nil { - return values, nil - } - - if val.Kind() != reflect.Struct { - return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) - } - - err := reflectValue(values, val, "") - return values, err -} - -// reflectValue populates the values parameter from the struct fields in val. -// Embedded structs are followed recursively (using the rules defined in the -// Values function documentation) breadth-first. -func reflectValue(values url.Values, val reflect.Value, scope string) error { - var embedded []reflect.Value - - typ := val.Type() - for i := 0; i < typ.NumField(); i++ { - sf := typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - - sv := val.Field(i) - tag := sf.Tag.Get("url") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - - if name == "" { - if sf.Anonymous { - v := reflect.Indirect(sv) - if v.IsValid() && v.Kind() == reflect.Struct { - // save embedded struct for later processing - embedded = append(embedded, v) - continue - } - } - - name = sf.Name - } - - if scope != "" { - name = scope + "[" + name + "]" - } - - if opts.Contains("omitempty") && isEmptyValue(sv) { - continue - } - - if sv.Type().Implements(encoderType) { - // if sv is a nil pointer and the custom encoder is defined on a non-pointer - // method receiver, set sv to the zero value of the underlying type - if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(encoderType) { - sv = reflect.New(sv.Type().Elem()) - } - - m := sv.Interface().(Encoder) - if err := m.EncodeValues(name, &values); err != nil { - return err - } - continue - } - - // recursively dereference pointers. break on nil pointers - for sv.Kind() == reflect.Ptr { - if sv.IsNil() { - break - } - sv = sv.Elem() - } - - if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { - var del string - if opts.Contains("comma") { - del = "," - } else if opts.Contains("space") { - del = " " - } else if opts.Contains("semicolon") { - del = ";" - } else if opts.Contains("brackets") { - name = name + "[]" - } else { - del = sf.Tag.Get("del") - } - - if del != "" { - s := new(bytes.Buffer) - first := true - for i := 0; i < sv.Len(); i++ { - if first { - first = false - } else { - s.WriteString(del) - } - s.WriteString(valueString(sv.Index(i), opts, sf)) - } - values.Add(name, s.String()) - } else { - for i := 0; i < sv.Len(); i++ { - k := name - if opts.Contains("numbered") { - k = fmt.Sprintf("%s%d", name, i) - } - values.Add(k, valueString(sv.Index(i), opts, sf)) - } - } - continue - } - - if sv.Type() == timeType { - values.Add(name, valueString(sv, opts, sf)) - continue - } - - if sv.Kind() == reflect.Struct { - if err := reflectValue(values, sv, name); err != nil { - return err - } - continue - } - - values.Add(name, valueString(sv, opts, sf)) - } - - for _, f := range embedded { - if err := reflectValue(values, f, scope); err != nil { - return err - } - } - - return nil -} - -// valueString returns the string representation of a value. -func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return "" - } - v = v.Elem() - } - - if v.Kind() == reflect.Bool && opts.Contains("int") { - if v.Bool() { - return "1" - } - return "0" - } - - if v.Type() == timeType { - t := v.Interface().(time.Time) - if opts.Contains("unix") { - return strconv.FormatInt(t.Unix(), 10) - } - if opts.Contains("unixmilli") { - return strconv.FormatInt((t.UnixNano() / 1e6), 10) - } - if opts.Contains("unixnano") { - return strconv.FormatInt(t.UnixNano(), 10) - } - if layout := sf.Tag.Get("layout"); layout != "" { - return t.Format(layout) - } - return t.Format(time.RFC3339) - } - - return fmt.Sprint(v.Interface()) -} - -// isEmptyValue checks if a value should be considered empty for the purposes -// of omitting fields with the "omitempty" option. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - type zeroable interface { - IsZero() bool - } - - if z, ok := v.Interface().(zeroable); ok { - return z.IsZero() - } - - return false -} - -// tagOptions is the string following a comma in a struct field's "url" tag, or -// the empty string. It does not include the leading comma. -type tagOptions []string - -// parseTag splits a struct field's url tag into its name and comma-separated -// options. -func parseTag(tag string) (string, tagOptions) { - s := strings.Split(tag, ",") - return s[0], s[1:] -} - -// Contains checks whether the tagOptions contains the specified option. -func (o tagOptions) Contains(option string) bool { - for _, s := range o { - if s == option { - return true - } - } - return false -} diff --git a/openshift/tools/vendor/github.com/google/shlex/COPYING b/openshift/tools/vendor/github.com/google/shlex/COPYING deleted file mode 100644 index d645695673..0000000000 --- a/openshift/tools/vendor/github.com/google/shlex/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/openshift/tools/vendor/github.com/google/shlex/README b/openshift/tools/vendor/github.com/google/shlex/README deleted file mode 100644 index c86bcc066f..0000000000 --- a/openshift/tools/vendor/github.com/google/shlex/README +++ /dev/null @@ -1,2 +0,0 @@ -go-shlex is a simple lexer for go that supports shell-style quoting, -commenting, and escaping. diff --git a/openshift/tools/vendor/github.com/google/shlex/shlex.go b/openshift/tools/vendor/github.com/google/shlex/shlex.go deleted file mode 100644 index d98308bce3..0000000000 --- a/openshift/tools/vendor/github.com/google/shlex/shlex.go +++ /dev/null @@ -1,416 +0,0 @@ -/* -Copyright 2012 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package shlex implements a simple lexer which splits input in to tokens using -shell-style rules for quoting and commenting. - -The basic use case uses the default ASCII lexer to split a string into sub-strings: - - shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} - -To process a stream of strings: - - l := NewLexer(os.Stdin) - for ; token, err := l.Next(); err != nil { - // process token - } - -To access the raw token stream (which includes tokens for comments): - - t := NewTokenizer(os.Stdin) - for ; token, err := t.Next(); err != nil { - // process token - } - -*/ -package shlex - -import ( - "bufio" - "fmt" - "io" - "strings" -) - -// TokenType is a top-level token classification: A word, space, comment, unknown. -type TokenType int - -// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. -type runeTokenClass int - -// the internal state used by the lexer state machine -type lexerState int - -// Token is a (type, value) pair representing a lexographical token. -type Token struct { - tokenType TokenType - value string -} - -// Equal reports whether tokens a, and b, are equal. -// Two tokens are equal if both their types and values are equal. A nil token can -// never be equal to another token. -func (a *Token) Equal(b *Token) bool { - if a == nil || b == nil { - return false - } - if a.tokenType != b.tokenType { - return false - } - return a.value == b.value -} - -// Named classes of UTF-8 runes -const ( - spaceRunes = " \t\r\n" - escapingQuoteRunes = `"` - nonEscapingQuoteRunes = "'" - escapeRunes = `\` - commentRunes = "#" -) - -// Classes of rune token -const ( - unknownRuneClass runeTokenClass = iota - spaceRuneClass - escapingQuoteRuneClass - nonEscapingQuoteRuneClass - escapeRuneClass - commentRuneClass - eofRuneClass -) - -// Classes of lexographic token -const ( - UnknownToken TokenType = iota - WordToken - SpaceToken - CommentToken -) - -// Lexer state machine states -const ( - startState lexerState = iota // no runes have been seen - inWordState // processing regular runes in a word - escapingState // we have just consumed an escape rune; the next rune is literal - escapingQuotedState // we have just consumed an escape rune within a quoted string - quotingEscapingState // we are within a quoted string that supports escaping ("...") - quotingState // we are within a string that does not support escaping ('...') - commentState // we are within a comment (everything following an unquoted or unescaped # -) - -// tokenClassifier is used for classifying rune characters. -type tokenClassifier map[rune]runeTokenClass - -func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { - for _, runeChar := range runes { - typeMap[runeChar] = tokenType - } -} - -// newDefaultClassifier creates a new classifier for ASCII characters. -func newDefaultClassifier() tokenClassifier { - t := tokenClassifier{} - t.addRuneClass(spaceRunes, spaceRuneClass) - t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) - t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) - t.addRuneClass(escapeRunes, escapeRuneClass) - t.addRuneClass(commentRunes, commentRuneClass) - return t -} - -// ClassifyRune classifiees a rune -func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { - return t[runeVal] -} - -// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. -type Lexer Tokenizer - -// NewLexer creates a new lexer from an input stream. -func NewLexer(r io.Reader) *Lexer { - - return (*Lexer)(NewTokenizer(r)) -} - -// Next returns the next word, or an error. If there are no more words, -// the error will be io.EOF. -func (l *Lexer) Next() (string, error) { - for { - token, err := (*Tokenizer)(l).Next() - if err != nil { - return "", err - } - switch token.tokenType { - case WordToken: - return token.value, nil - case CommentToken: - // skip comments - default: - return "", fmt.Errorf("Unknown token type: %v", token.tokenType) - } - } -} - -// Tokenizer turns an input stream into a sequence of typed tokens -type Tokenizer struct { - input bufio.Reader - classifier tokenClassifier -} - -// NewTokenizer creates a new tokenizer from an input stream. -func NewTokenizer(r io.Reader) *Tokenizer { - input := bufio.NewReader(r) - classifier := newDefaultClassifier() - return &Tokenizer{ - input: *input, - classifier: classifier} -} - -// scanStream scans the stream for the next token using the internal state machine. -// It will panic if it encounters a rune which it does not know how to handle. -func (t *Tokenizer) scanStream() (*Token, error) { - state := startState - var tokenType TokenType - var value []rune - var nextRune rune - var nextRuneType runeTokenClass - var err error - - for { - nextRune, _, err = t.input.ReadRune() - nextRuneType = t.classifier.ClassifyRune(nextRune) - - if err == io.EOF { - nextRuneType = eofRuneClass - err = nil - } else if err != nil { - return nil, err - } - - switch state { - case startState: // no runes read yet - { - switch nextRuneType { - case eofRuneClass: - { - return nil, io.EOF - } - case spaceRuneClass: - { - } - case escapingQuoteRuneClass: - { - tokenType = WordToken - state = quotingEscapingState - } - case nonEscapingQuoteRuneClass: - { - tokenType = WordToken - state = quotingState - } - case escapeRuneClass: - { - tokenType = WordToken - state = escapingState - } - case commentRuneClass: - { - tokenType = CommentToken - state = commentState - } - default: - { - tokenType = WordToken - value = append(value, nextRune) - state = inWordState - } - } - } - case inWordState: // in a regular word - { - switch nextRuneType { - case eofRuneClass: - { - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case spaceRuneClass: - { - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case escapingQuoteRuneClass: - { - state = quotingEscapingState - } - case nonEscapingQuoteRuneClass: - { - state = quotingState - } - case escapeRuneClass: - { - state = escapingState - } - default: - { - value = append(value, nextRune) - } - } - } - case escapingState: // the rune after an escape character - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found after escape character") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - default: - { - state = inWordState - value = append(value, nextRune) - } - } - } - case escapingQuotedState: // the next rune after an escape character, in double quotes - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found after escape character") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - default: - { - state = quotingEscapingState - value = append(value, nextRune) - } - } - } - case quotingEscapingState: // in escaping double quotes - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found when expecting closing quote") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case escapingQuoteRuneClass: - { - state = inWordState - } - case escapeRuneClass: - { - state = escapingQuotedState - } - default: - { - value = append(value, nextRune) - } - } - } - case quotingState: // in non-escaping single quotes - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found when expecting closing quote") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case nonEscapingQuoteRuneClass: - { - state = inWordState - } - default: - { - value = append(value, nextRune) - } - } - } - case commentState: // in a comment - { - switch nextRuneType { - case eofRuneClass: - { - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case spaceRuneClass: - { - if nextRune == '\n' { - state = startState - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } else { - value = append(value, nextRune) - } - } - default: - { - value = append(value, nextRune) - } - } - } - default: - { - return nil, fmt.Errorf("Unexpected state: %v", state) - } - } - } -} - -// Next returns the next token in the stream. -func (t *Tokenizer) Next() (*Token, error) { - return t.scanStream() -} - -// Split partitions a string into a slice of strings. -func Split(s string) ([]string, error) { - l := NewLexer(strings.NewReader(s)) - subStrings := make([]string, 0) - for { - word, err := l.Next() - if err != nil { - if err == io.EOF { - return subStrings, nil - } - return subStrings, err - } - subStrings = append(subStrings, word) - } -} diff --git a/openshift/tools/vendor/github.com/klauspost/compress/fse/bitwriter.go b/openshift/tools/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7b..d58b3fe423 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -143,7 +143,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/openshift/tools/vendor/github.com/klauspost/compress/fse/compress.go b/openshift/tools/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f9..8c8baa4fc2 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/fse/compress.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/fse/compress.go @@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/openshift/tools/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/openshift/tools/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac7..41db94cded 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -85,7 +85,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/openshift/tools/vendor/github.com/klauspost/compress/huff0/compress.go b/openshift/tools/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f0..a97cf1b5d3 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/huff0/compress.go @@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { offsetIdx := len(s.Out) s.Out = append(s.Out, sixZeros[:]...) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup wg.Add(4) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { }(i) } wg.Wait() - for i := 0; i < 4; i++ { + for i := range 4 { o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress.go b/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress.go index 0f56b02d74..7d0efa8818 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { @@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index ba7e8e6b02..99ddd4af97 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { var br [4]bitReaderShifted // Decode "jump table" start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/openshift/tools/vendor/github.com/klauspost/compress/huff0/huff0.go b/openshift/tools/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0a..67d9e05b6c 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ @@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ diff --git a/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go index 0cfb5c0e27..4f2a0d8c58 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) { } // Store64 will store v at b. -func Store64(b []byte, v uint64) { - binary.LittleEndian.PutUint64(b, v) +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) } diff --git a/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go index ada45cd909..218a38bc4a 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 { // Store16 will store v at b. func Store16(b []byte, v uint16) { - //binary.LittleEndian.PutUint16(b, v) *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v } // Store32 will store v at b. func Store32(b []byte, v uint32) { - //binary.LittleEndian.PutUint32(b, v) *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v } -// Store64 will store v at b. -func Store64(b []byte, v uint64) { - //binary.LittleEndian.PutUint64(b, v) - *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v } diff --git a/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/decode.go index 40796a49d6..a2c82fcd22 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -209,7 +209,7 @@ func (r *Reader) fill() error { if !r.readFull(r.buf[:len(magicBody)], false) { return r.err } - for i := 0; i < len(magicBody); i++ { + for i := range len(magicBody) { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt return r.err diff --git a/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/encode.go index 13c6040a5d..860a994167 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -20,8 +20,10 @@ import ( func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b0..b22b297e62 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -88,7 +88,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/blockdec.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/blockdec.go index 0dd742fd2a..2329e996f8 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -54,11 +54,11 @@ const ( ) var ( - huffDecoderPool = sync.Pool{New: func() interface{} { + huffDecoderPool = sync.Pool{New: func() any { return &huff0.Scratch{} }} - fseDecoderPool = sync.Pool{New: func() interface{} { + fseDecoderPool = sync.Pool{New: func() any { return &fseDecoder{} }} ) @@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if compMode&3 != 0 { return errors.New("corrupt block: reserved bits not zero") } - for i := uint(0); i < 3; i++ { + for i := range uint(3) { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { println("Table", tableIndex(i), "is", mode) diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/decoder.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/decoder.go index ea2a19376c..30df5513d5 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) if uint64(size) > d.o.maxDecodedSize { size = int(d.o.maxDecodedSize) } diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/dict.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/dict.go index b7b83164bc..2ffbfdf379 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/dict.go @@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { hist := o.History contents := o.Contents debug := o.DebugOut != nil - println := func(args ...interface{}) { + println := func(args ...any) { if o.DebugOut != nil { fmt.Fprintln(o.DebugOut, args...) } } - printf := func(s string, args ...interface{}) { + printf := func(s string, args ...any) { if o.DebugOut != nil { fmt.Fprintf(o.DebugOut, s, args...) } } - print := func(args ...interface{}) { + print := func(args ...any) { if o.DebugOut != nil { fmt.Fprint(o.DebugOut, args...) } @@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { } // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } + avgSize := min(litTotal, huff0.BlockSizeMax/2) huffBuff := make([]byte, 0, avgSize) // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } + div := max(litTotal/avgSize, 1) if debug { println("Huffman weights:") } @@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { huffBuff = append(huffBuff, 255) } scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { + for tries := range 255 { scratch = &huff0.Scratch{TableLog: 11} _, _, err = huff0.Compress1X(huffBuff, scratch) if err == nil { @@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { // Bail out.... Just generate something huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { + for i := range 128 { huffBuff = append(huffBuff, byte(i)) } continue diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_base.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_base.go index 7d250c67f5..c1192ec38f 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -8,7 +8,7 @@ import ( ) const ( - dictShardBits = 6 + dictShardBits = 7 ) type fastBase struct { @@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // or a window size small enough to contain the input size, if > 0. func (e *fastBase) WindowSize(size int64) int32 { if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } + b := max( + // Keep minimum window. + int32(1)< tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { s-- offset-- @@ -382,10 +377,7 @@ encodeLoop: nextEmit = s // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } + end := min(s, sLimit+4) off := index0 + e.cur for index0 < end { cv0 := load6432(src, index0) @@ -444,10 +436,7 @@ encodeLoop: nextEmit = s // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } + end := min(s, sLimit-4) off := index0 + e.cur for index0 < end { diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_better.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_better.go index 84a79fde76..85dcd28c32 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -190,10 +190,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -252,10 +249,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -480,10 +474,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -719,10 +710,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -783,10 +771,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -1005,10 +990,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d36be7bd8c..cf8cad00dc 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -13,7 +13,7 @@ const ( dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table @@ -149,10 +149,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -266,10 +263,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -462,10 +456,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -576,10 +567,7 @@ encodeLoop: l := int32(matchLen(src[s+4:], src[t+4:])) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -809,10 +797,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -927,10 +912,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7da..9180a3a582 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -143,10 +143,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -223,10 +220,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -387,10 +381,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -469,10 +460,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -655,10 +643,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -735,10 +720,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/framedec.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/framedec.go index e47af66e7c..d88f067e5c 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error { if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } + d.WindowSize = max(d.FrameContentSize, MinWindowSize) if d.WindowSize > d.o.maxDecodedSize { if debugDecoder { printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index ab26326a8f..3a0f4e7fbe 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9a7de82f9e..0bfb0e43c7 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) if debugDecoder { println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index c59f17e07a..1f8c3cec28 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeSyncAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], @@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC func (s *sequenceDecs) decode(seqs []seqVals) error { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 0000000000..2efc0497bf --- /dev/null +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/snappy.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/snappy.go index a17381b8f8..336c288930 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { if !r.readFull(r.buf[:len(snappyMagicBody)], false) { return written, r.err } - for i := 0; i < len(snappyMagicBody); i++ { + for i := range len(snappyMagicBody) { if r.buf[i] != snappyMagicBody[i] { println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) r.err = ErrSnappyCorrupt diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/zip.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/zip.go index 29c15c8c4e..3198d71892 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/zip.go @@ -19,7 +19,7 @@ const ZipMethodWinZip = 93 const ZipMethodPKWare = 20 // zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { +var zipReaderPool = sync.Pool{New: func() any { z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) if err != nil { panic(err) diff --git a/openshift/tools/vendor/github.com/klauspost/compress/zstd/zstd.go b/openshift/tools/vendor/github.com/klauspost/compress/zstd/zstd.go index 6252b46ae6..1a869710d2 100644 --- a/openshift/tools/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/openshift/tools/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -98,13 +98,13 @@ var ( ErrDecoderNilInput = errors.New("nil input provided as reader") ) -func println(a ...interface{}) { +func println(a ...any) { if debug || debugDecoder || debugEncoder { log.Println(a...) } } -func printf(format string, a ...interface{}) { +func printf(format string, a ...any) { if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } diff --git a/openshift/tools/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/openshift/tools/vendor/github.com/mitchellh/go-homedir/LICENSE similarity index 100% rename from openshift/tools/vendor/github.com/go-viper/mapstructure/v2/LICENSE rename to openshift/tools/vendor/github.com/mitchellh/go-homedir/LICENSE diff --git a/openshift/tools/vendor/github.com/mitchellh/go-homedir/README.md b/openshift/tools/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000000..d70706d5b3 --- /dev/null +++ b/openshift/tools/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/openshift/tools/vendor/github.com/mitchellh/go-homedir/homedir.go b/openshift/tools/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000000..25378537ea --- /dev/null +++ b/openshift/tools/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/openshift/tools/vendor/github.com/modern-go/reflect2/safe_type.go b/openshift/tools/vendor/github.com/modern-go/reflect2/safe_type.go index ee4e7bb6ed..5646309e09 100644 --- a/openshift/tools/vendor/github.com/modern-go/reflect2/safe_type.go +++ b/openshift/tools/vendor/github.com/modern-go/reflect2/safe_type.go @@ -6,10 +6,12 @@ import ( ) type safeType struct { - reflect.Type - cfg *frozenConfig + Type reflect.Type + cfg *frozenConfig } +var _ Type = &safeType{} + func (type2 *safeType) New() interface{} { return reflect.New(type2.Type).Interface() } @@ -18,6 +20,22 @@ func (type2 *safeType) UnsafeNew() unsafe.Pointer { panic("does not support unsafe operation") } +func (type2 *safeType) Kind() reflect.Kind { + return type2.Type.Kind() +} + +func (type2 *safeType) Len() int { + return type2.Type.Len() +} + +func (type2 *safeType) NumField() int { + return type2.Type.NumField() +} + +func (type2 *safeType) String() string { + return type2.Type.String() +} + func (type2 *safeType) Elem() Type { return type2.cfg.Type2(type2.Type.Elem()) } diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/LICENSE b/openshift/tools/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 0000000000..9fdc20fdb6 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 0000000000..581cf7cdfa --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,62 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" +) diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 0000000000..36b0aeb8f1 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,111 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` + + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + Platform + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 0000000000..1881b11814 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,80 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64le`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} + +// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. +var DescriptorEmptyJSON = Descriptor{ + MediaType: MediaTypeEmptyJSON, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 0000000000..e2bed9d4e4 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 0000000000..c5503cb305 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name containing ImageLayout in an OCI Image Layout + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" + // ImageIndexFile is the file name of the entry point for references and descriptors in an OCI Image Layout + ImageIndexFile = "index.json" + // ImageBlobsDir is the directory name containing content addressable blobs in an OCI Image Layout + ImageBlobsDir = "blobs" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 0000000000..26fec52a6b --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,41 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 0000000000..ce8313e796 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,85 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value "{}". + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" +) + +const ( + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" +) + +// Non-distributable layer media-types. +// +// Deprecated: Non-distributable layers are deprecated, and not recommended +// for future use. Implementations SHOULD NOT produce new non-distributable +// layers. +// https://github.com/opencontainers/image-spec/pull/965 +const ( + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" +) diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 0000000000..c3897c7ca0 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 1 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 1 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 0000000000..58a1510f33 --- /dev/null +++ b/openshift/tools/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/LICENSE b/openshift/tools/vendor/github.com/openshift/api/LICENSE new file mode 100644 index 0000000000..5c389317ec --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/Makefile b/openshift/tools/vendor/github.com/openshift/api/config/v1/Makefile new file mode 100644 index 0000000000..66bf636305 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1" diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/doc.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/doc.go new file mode 100644 index 0000000000..f994547583 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +openshift:featuregated-schema-gen=true + +// +kubebuilder:validation:Optional +// +groupName=config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/register.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/register.go new file mode 100644 index 0000000000..222c7f0cc7 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/register.go @@ -0,0 +1,84 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &APIServer{}, + &APIServerList{}, + &Authentication{}, + &AuthenticationList{}, + &Build{}, + &BuildList{}, + &ClusterOperator{}, + &ClusterOperatorList{}, + &ClusterVersion{}, + &ClusterVersionList{}, + &Console{}, + &ConsoleList{}, + &DNS{}, + &DNSList{}, + &FeatureGate{}, + &FeatureGateList{}, + &Image{}, + &ImageList{}, + &Infrastructure{}, + &InfrastructureList{}, + &Ingress{}, + &IngressList{}, + &Node{}, + &NodeList{}, + &Network{}, + &NetworkList{}, + &OAuth{}, + &OAuthList{}, + &OperatorHub{}, + &OperatorHubList{}, + &Project{}, + &ProjectList{}, + &Proxy{}, + &ProxyList{}, + &Scheduler{}, + &SchedulerList{}, + &ImageContentPolicy{}, + &ImageContentPolicyList{}, + &ImageDigestMirrorSet{}, + &ImageDigestMirrorSetList{}, + &ImageTagMirrorSet{}, + &ImageTagMirrorSetList{}, + &ImagePolicy{}, + &ImagePolicyList{}, + &ClusterImagePolicy{}, + &ClusterImagePolicyList{}, + &InsightsDataGather{}, + &InsightsDataGatherList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/stringsource.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/stringsource.go new file mode 100644 index 0000000000..6a5718c1db --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/stringsource.go @@ -0,0 +1,31 @@ +package v1 + +import "encoding/json" + +// UnmarshalJSON implements the json.Unmarshaller interface. +// If the value is a string, it sets the Value field of the StringSource. +// Otherwise, it is unmarshaled into the StringSourceSpec struct +func (s *StringSource) UnmarshalJSON(value []byte) error { + // If we can unmarshal to a simple string, just set the value + var simpleValue string + if err := json.Unmarshal(value, &simpleValue); err == nil { + s.Value = simpleValue + return nil + } + + // Otherwise do the full struct unmarshal + return json.Unmarshal(value, &s.StringSourceSpec) +} + +// MarshalJSON implements the json.Marshaller interface. +// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. +// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. +func (s *StringSource) MarshalJSON() ([]byte, error) { + // If we have only a cleartext value set, do a simple string marshal + if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { + return json.Marshal(s.Value) + } + + // Otherwise do the full struct marshal of the externalized bits + return json.Marshal(s.StringSourceSpec) +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types.go new file mode 100644 index 0000000000..3e17ca0ccb --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types.go @@ -0,0 +1,431 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ConfigMapFileReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapFileReference struct { + Name string `json:"name"` + // key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + Key string `json:"key,omitempty"` +} + +// ConfigMapNameReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapNameReference struct { + // name is the metadata.name of the referenced config map + // +required + Name string `json:"name"` +} + +// SecretNameReference references a secret in a specific namespace. +// The namespace must be specified at the point of use. +type SecretNameReference struct { + // name is the metadata.name of the referenced secret + // +required + Name string `json:"name"` +} + +// HTTPServingInfo holds configuration for serving HTTP +type HTTPServingInfo struct { + // ServingInfo is the HTTP serving information + ServingInfo `json:",inline"` + // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` + // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // -1 there is no limit on requests. + RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` +} + +// ServingInfo holds information about serving web pages +type ServingInfo struct { + // bindAddress is the ip:port to serve on + BindAddress string `json:"bindAddress"` + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // CertInfo is the TLS cert info for serving secure traffic. + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` + // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // +optional + ClientCA string `json:"clientCA,omitempty"` + // namedCertificates is a list of certificates to use to secure requests to specific hostnames + NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` + // minTLSVersion is the minimum TLS version supported. + // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + MinTLSVersion string `json:"minTLSVersion,omitempty"` + // cipherSuites contains an overridden list of ciphers for the server to support. + // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// CertInfo relates a certificate with a private key +type CertInfo struct { + // certFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` + // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string `json:"keyFile"` +} + +// NamedCertificate specifies a certificate/key, and the names it should be served for +type NamedCertificate struct { + // names is a list of DNS names this certificate should be used to secure + // A name can be a normal DNS name, or can contain leading wildcard segments. + Names []string `json:"names,omitempty"` + // CertInfo is the TLS cert info for serving secure traffic + CertInfo `json:",inline"` +} + +// LeaderElection provides information to elect a leader +type LeaderElection struct { + // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case. + Disable bool `json:"disable,omitempty"` + // namespace indicates which namespace the resource is in + Namespace string `json:"namespace,omitempty"` + // name indicates what name to use for the resource + Name string `json:"name,omitempty"` + + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + // +nullable + LeaseDuration metav1.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + // +nullable + RenewDeadline metav1.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + // +nullable + RetryPeriod metav1.Duration `json:"retryPeriod"` +} + +// StringSource allows specifying a string inline, or externally via env var or file. +// When it contains only a string value, it marshals to a simple JSON string. +type StringSource struct { + // StringSourceSpec specifies the string value, or external location + StringSourceSpec `json:",inline"` +} + +// StringSourceSpec specifies a string value, or external location +type StringSourceSpec struct { + // value specifies the cleartext value, or an encrypted value if keyFile is specified. + Value string `json:"value"` + + // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + Env string `json:"env"` + + // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + File string `json:"file"` + + // keyFile references a file containing the key to use to decrypt the value. + KeyFile string `json:"keyFile"` +} + +// RemoteConnectionInfo holds information necessary for establishing a remote connection +type RemoteConnectionInfo struct { + // url is the remote URL to connect to + URL string `json:"url"` + // ca is the CA for verifying TLS connections + CA string `json:"ca"` + // CertInfo is the TLS client cert information to present + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type AdmissionConfig struct { + PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"` + + // enabledPlugins is a list of admission plugins that must be on in addition to the default list. + // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon + // and can result in performance penalties and unexpected behavior. + EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"` + + // disabledPlugins is a list of admission plugins that must be off. Putting something in this list + // is almost always a mistake and likely to result in cluster instability. + DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"` +} + +// AdmissionPluginConfig holds the necessary configuration options for admission plugins +type AdmissionPluginConfig struct { + // location is the path to a configuration file that contains the plugin's + // configuration + Location string `json:"location"` + + // configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + Configuration runtime.RawExtension `json:"configuration"` +} + +type LogFormatType string + +type WebHookModeType string + +const ( + // LogFormatLegacy saves event in 1-line text format. + LogFormatLegacy LogFormatType = "legacy" + // LogFormatJson saves event in structured json format. + LogFormatJson LogFormatType = "json" + + // WebHookModeBatch indicates that the webhook should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + WebHookModeBatch WebHookModeType = "batch" + // WebHookModeBlocking causes the webhook to block on every attempt to process + // a set of events. This causes requests to the API server to wait for a + // round trip to the external audit service before sending a response. + WebHookModeBlocking WebHookModeType = "blocking" +) + +// AuditConfig holds configuration for the audit capabilities +type AuditConfig struct { + // If this flag is set, audit log will be printed in the logs. + // The logs contains, method, user and a requested URL. + Enabled bool `json:"enabled"` + // All requests coming to the apiserver will be logged to this file. + AuditFilePath string `json:"auditFilePath"` + // Maximum number of days to retain old log files based on the timestamp encoded in their filename. + MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"` + // Maximum number of old log files to retain. + MaximumRetainedFiles int32 `json:"maximumRetainedFiles"` + // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. + MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` + + // policyFile is a path to the file that defines the audit policy configuration. + PolicyFile string `json:"policyFile"` + // policyConfiguration is an embedded policy configuration object to be used + // as the audit policy configuration. If present, it will be used instead of + // the path to the policy file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` + + // Format of saved audits (legacy or json). + LogFormat LogFormatType `json:"logFormat"` + + // Path to a .kubeconfig formatted file that defines the audit webhook configuration. + WebHookKubeConfig string `json:"webHookKubeConfig"` + // Strategy for sending audit events (block or batch). + WebHookMode WebHookModeType `json:"webHookMode"` +} + +// EtcdConnectionInfo holds information necessary for connecting to an etcd server +type EtcdConnectionInfo struct { + // urls are the URLs for etcd + URLs []string `json:"urls,omitempty"` + // ca is a file containing trusted roots for the etcd server certificates + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to etcd + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type EtcdStorageConfig struct { + EtcdConnectionInfo `json:",inline"` + + // storagePrefix is the path within etcd that the OpenShift resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. + StoragePrefix string `json:"storagePrefix"` +} + +// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd +type GenericAPIServerConfig struct { + // servingInfo describes how to start serving + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // corsAllowedOrigins + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + + // auditConfig describes how to configure audit information + AuditConfig AuditConfig `json:"auditConfig"` + + // storageConfig contains information about how to use + StorageConfig EtcdStorageConfig `json:"storageConfig"` + + // admissionConfig holds information about how to configure admission. + AdmissionConfig AdmissionConfig `json:"admission"` + + KubeClientConfig KubeClientConfig `json:"kubeClientConfig"` +} + +type KubeClientConfig struct { + // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config + KubeConfig string `json:"kubeConfig"` + + // connectionOverrides specifies client overrides for system components to loop back to this master. + ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"` +} + +type ClientConnectionOverrides struct { + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // contentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + + // qps controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// GenericControllerConfig provides information to configure a controller +type GenericControllerConfig struct { + // servingInfo is the HTTP serving information for the controller's endpoints + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // leaderElection provides information to elect a leader. Only override this if you have a specific need + LeaderElection LeaderElection `json:"leaderElection"` + + // authentication allows configuration of authentication for the endpoints + Authentication DelegatedAuthentication `json:"authentication"` + // authorization allows configuration of authentication for the endpoints + Authorization DelegatedAuthorization `json:"authorization"` +} + +// DelegatedAuthentication allows authentication to be disabled. +type DelegatedAuthentication struct { + // disabled indicates that authentication should be disabled. By default it will use delegated authentication. + Disabled bool `json:"disabled,omitempty"` +} + +// DelegatedAuthorization allows authorization to be disabled. +type DelegatedAuthorization struct { + // disabled indicates that authorization should be disabled. By default it will use delegated authorization. + Disabled bool `json:"disabled,omitempty"` +} +type RequiredHSTSPolicy struct { + // namespaceSelector specifies a label selector such that the policy applies only to those routes that + // are in namespaces with labels that match the selector, and are in one of the DomainPatterns. + // Defaults to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // domainPatterns is a list of domains for which the desired HSTS annotations are required. + // If domainPatterns is specified and a route is created with a spec.host matching one of the domains, + // the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. + // + // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. + // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. + // +kubebuilder:validation:MinItems=1 + // +required + DomainPatterns []string `json:"domainPatterns"` + + // maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. + // If set to 0, it negates the effect, and hosts are removed as HSTS hosts. + // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. + // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS + // policy will eventually expire on that client. + MaxAge MaxAgePolicy `json:"maxAge"` + + // preloadPolicy directs the client to include hosts in its host preload list so that + // it never needs to do an initial load to get the HSTS header (note that this is not defined + // in RFC 6797 and is therefore client implementation-dependent). + // +optional + PreloadPolicy PreloadPolicy `json:"preloadPolicy,omitempty"` + + // includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's + // domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: + // - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com + // - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com + // +optional + IncludeSubDomainsPolicy IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` +} + +// MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy +type MaxAgePolicy struct { + // The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // This value can be left unspecified, in which case no upper limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` + + // The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary + // tool for administrators to quickly correct mistakes. + // This value can be left unspecified, in which case no lower limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` +} + +// PreloadPolicy contains a value for specifying a compliant HSTS preload policy for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequirePreload;RequireNoPreload;NoOpinion +type PreloadPolicy string + +const ( + // RequirePreloadPolicy means HSTS "preload" is required by the RequiredHSTSPolicy + RequirePreloadPolicy PreloadPolicy = "RequirePreload" + + // RequireNoPreloadPolicy means HSTS "preload" is forbidden by the RequiredHSTSPolicy + RequireNoPreloadPolicy PreloadPolicy = "RequireNoPreload" + + // NoOpinionPreloadPolicy means HSTS "preload" doesn't matter to the RequiredHSTSPolicy + NoOpinionPreloadPolicy PreloadPolicy = "NoOpinion" +) + +// IncludeSubDomainsPolicy contains a value for specifying a compliant HSTS includeSubdomains policy +// for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequireIncludeSubDomains;RequireNoIncludeSubDomains;NoOpinion +type IncludeSubDomainsPolicy string + +const ( + // RequireIncludeSubDomains means HSTS "includeSubDomains" is required by the RequiredHSTSPolicy + RequireIncludeSubDomains IncludeSubDomainsPolicy = "RequireIncludeSubDomains" + + // RequireNoIncludeSubDomains means HSTS "includeSubDomains" is forbidden by the RequiredHSTSPolicy + RequireNoIncludeSubDomains IncludeSubDomainsPolicy = "RequireNoIncludeSubDomains" + + // NoOpinionIncludeSubDomains means HSTS "includeSubDomains" doesn't matter to the RequiredHSTSPolicy + NoOpinionIncludeSubDomains IncludeSubDomainsPolicy = "NoOpinion" +) + +// IBMCloudServiceName contains a value specifying the name of an IBM Cloud Service, +// which are used by MAPI, CIRO, CIO, Installer, etc. +// +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;ResourceController;ResourceManager;VPC +type IBMCloudServiceName string + +const ( + // IBMCloudServiceCIS is the name for IBM Cloud CIS. + IBMCloudServiceCIS IBMCloudServiceName = "CIS" + // IBMCloudServiceCOS is the name for IBM Cloud COS. + IBMCloudServiceCOS IBMCloudServiceName = "COS" + // IBMCloudServiceCOSConfig is the name for IBM Cloud COS Config service. + IBMCloudServiceCOSConfig IBMCloudServiceName = "COSConfig" + // IBMCloudServiceDNSServices is the name for IBM Cloud DNS Services. + IBMCloudServiceDNSServices IBMCloudServiceName = "DNSServices" + // IBMCloudServiceGlobalCatalog is the name for IBM Cloud Global Catalog service. + IBMCloudServiceGlobalCatalog IBMCloudServiceName = "GlobalCatalog" + // IBMCloudServiceGlobalSearch is the name for IBM Cloud Global Search. + IBMCloudServiceGlobalSearch IBMCloudServiceName = "GlobalSearch" + // IBMCloudServiceGlobalTagging is the name for IBM Cloud Global Tagging. + IBMCloudServiceGlobalTagging IBMCloudServiceName = "GlobalTagging" + // IBMCloudServiceHyperProtect is the name for IBM Cloud Hyper Protect. + IBMCloudServiceHyperProtect IBMCloudServiceName = "HyperProtect" + // IBMCloudServiceIAM is the name for IBM Cloud IAM. + IBMCloudServiceIAM IBMCloudServiceName = "IAM" + // IBMCloudServiceKeyProtect is the name for IBM Cloud Key Protect. + IBMCloudServiceKeyProtect IBMCloudServiceName = "KeyProtect" + // IBMCloudServiceResourceController is the name for IBM Cloud Resource Controller. + IBMCloudServiceResourceController IBMCloudServiceName = "ResourceController" + // IBMCloudServiceResourceManager is the name for IBM Cloud Resource Manager. + IBMCloudServiceResourceManager IBMCloudServiceName = "ResourceManager" + // IBMCloudServiceVPC is the name for IBM Cloud VPC. + IBMCloudServiceVPC IBMCloudServiceName = "VPC" +) diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_apiserver.go new file mode 100644 index 0000000000..31d8881858 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -0,0 +1,251 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIServer holds configuration (like serving certificates, client CA and CORS domains) +// shared by all API servers in the system, among them especially kube-apiserver +// and openshift-apiserver. The canonical name of an instance is 'cluster'. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=apiservers,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type APIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +required + Spec APIServerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status APIServerStatus `json:"status"` +} + +type APIServerSpec struct { + // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates + // will be used for serving secure traffic. + // +optional + ServingCerts APIServerServingCerts `json:"servingCerts"` + // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for + // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. + // You usually only have to set this if you have your own PKI you wish to honor client certificates from. + // The ConfigMap must exist in the openshift-config namespace and contain the following required fields: + // - ConfigMap.Data["ca-bundle.crt"] - CA bundle. + // +optional + ClientCA ConfigMapNameReference `json:"clientCA"` + // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the + // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth + // server from JavaScript applications. + // The values are regular expressions that correspond to the Golang regular expression language. + // +optional + // +listType=atomic + AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` + // encryption allows the configuration of encryption of resources at the datastore layer. + // +optional + Encryption APIServerEncryption `json:"encryption"` + // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. + // + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is the Intermediate profile. + // +optional + TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + // audit specifies the settings for audit configuration to be applied to all OpenShift-provided + // API servers in the cluster. + // +optional + // +kubebuilder:default={profile: Default} + Audit Audit `json:"audit"` +} + +// AuditProfileType defines the audit policy profile type. +// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies;None +type AuditProfileType string + +const ( + // "None" disables audit logs. + NoneAuditProfileType AuditProfileType = "None" + + // "Default" is the existing default audit configuration policy. + DefaultAuditProfileType AuditProfileType = "Default" + + // "WriteRequestBodies" is similar to Default but it logs request and response + // HTTP payloads for write requests (create, update, patch) + WriteRequestBodiesAuditProfileType AuditProfileType = "WriteRequestBodies" + + // "AllRequestBodies" is similar to WriteRequestBodies, but also logs request + // and response HTTP payloads for read requests (get, list). + AllRequestBodiesAuditProfileType AuditProfileType = "AllRequestBodies" +) + +type Audit struct { + // profile specifies the name of the desired top-level audit profile to be applied to all requests + // sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, + // openshift-apiserver and oauth-apiserver), with the exception of those requests that match + // one or more of the customRules. + // + // The following profiles are provided: + // - Default: default policy which means MetaData level logging with the exception of events + // (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody + // level). + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // Warning: It is not recommended to disable audit logging by using the `None` profile unless you + // are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. + // If you disable audit logging and a support situation arises, you might need to enable audit logging + // and reproduce the issue in order to troubleshoot properly. + // + // If unset, the 'Default' profile is used as the default. + // + // +kubebuilder:default=Default + Profile AuditProfileType `json:"profile,omitempty"` + // customRules specify profiles per group. These profile take precedence over the + // top-level profile field if they apply. They are evaluation from top to bottom and + // the first one that matches, applies. + // +listType=map + // +listMapKey=group + // +optional + CustomRules []AuditCustomRule `json:"customRules,omitempty"` +} + +// AuditCustomRule describes a custom rule for an audit profile that takes precedence over +// the top-level profile. +type AuditCustomRule struct { + // group is a name of group a request user must be member of in order to this profile to apply. + // + // +kubebuilder:validation:MinLength=1 + // +required + Group string `json:"group"` + // profile specifies the name of the desired audit policy configuration to be deployed to + // all OpenShift-provided API servers in the cluster. + // + // The following profiles are provided: + // - Default: the existing default policy. + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // If unset, the 'Default' profile is used as the default. + // + // +required + Profile AuditProfileType `json:"profile"` +} + +type APIServerServingCerts struct { + // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. + // If no named certificates are provided, or no named certificates match the server name as understood by a client, + // the defaultServingCertificate will be used. + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` +} + +// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. +type APIServerNamedServingCert struct { + // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to + // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. + // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 + Names []string `json:"names,omitempty"` + // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. + // The secret must exist in the openshift-config namespace and contain the following required fields: + // - Secret.Data["tls.key"] - TLS private key. + // - Secret.Data["tls.crt"] - TLS certificate. + ServingCertificate SecretNameReference `json:"servingCertificate"` +} + +// APIServerEncryption is used to encrypt sensitive resources on the cluster. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=KMSEncryptionProvider,rule="has(self.type) && self.type == 'KMS' ? has(self.kms) : !has(self.kms)",message="kms config is required when encryption type is KMS, and forbidden otherwise" +// +union +type APIServerEncryption struct { + // type defines what encryption type should be used to encrypt resources at the datastore layer. + // When this field is unset (i.e. when it is set to the empty string), identity is implied. + // The behavior of unset can and will change over time. Even if encryption is enabled by default, + // the meaning of unset may change to a different encryption type based on changes in best practices. + // + // When encryption is enabled, all sensitive resources shipped with the platform are encrypted. + // This list of sensitive resources can and will change over time. The current authoritative list is: + // + // 1. secrets + // 2. configmaps + // 3. routes.route.openshift.io + // 4. oauthaccesstokens.oauth.openshift.io + // 5. oauthauthorizetokens.oauth.openshift.io + // + // +unionDiscriminator + // +optional + Type EncryptionType `json:"type,omitempty"` + + // kms defines the configuration for the external KMS instance that manages the encryption keys, + // when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an + // externally configured KMS instance. + // + // The Key Management Service (KMS) instance provides symmetric encryption and is responsible for + // managing the lifecyle of the encryption keys outside of the control plane. + // This allows integration with an external provider to manage the data encryption keys securely. + // + // +openshift:enable:FeatureGate=KMSEncryptionProvider + // +unionMember + // +optional + KMS *KMSConfig `json:"kms,omitempty"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";identity;aescbc;aesgcm +// +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryptionProvider,enum="";identity;aescbc;aesgcm;KMS +// +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryption,enum="";identity;aescbc;aesgcm;KMS +type EncryptionType string + +const ( + // identity refers to a type where no encryption is performed at the datastore layer. + // Resources are written as-is without encryption. + EncryptionTypeIdentity EncryptionType = "identity" + + // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESCBC EncryptionType = "aescbc" + + // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESGCM EncryptionType = "aesgcm" + + // kms refers to a type of encryption where the encryption keys are managed + // outside the control plane in a Key Management Service instance, + // encryption is still performed at the datastore layer. + EncryptionTypeKMS EncryptionType = "KMS" +) + +type APIServerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type APIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []APIServer `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_authentication.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_authentication.go new file mode 100644 index 0000000000..e7433281f4 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -0,0 +1,787 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings;ExternalOIDCWithUpstreamParity,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" + +// Authentication specifies cluster-wide settings for authentication (like OAuth and +// webhook token authenticators). The canonical name of an instance is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=authentications,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Authentication struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec AuthenticationSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status AuthenticationStatus `json:"status"` +} + +type AuthenticationSpec struct { + // type identifies the cluster managed, user facing authentication mode in use. + // Specifically, it manages the component that responds to login attempts. + // The default is IntegratedOAuth. + // +optional + Type AuthenticationType `json:"type"` + + // oauthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for an external OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // If oauthMetadata.name is non-empty, this value has precedence + // over any metadata reference stored in status. + // The key "oauthMetadata" is used to locate the data. + // If specified and the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config. + // +optional + OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` + + // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + // +listType=atomic + WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` + + // webhookTokenAuthenticator configures a remote token reviewer. + // These remote authentication webhooks can be used to verify bearer tokens + // via the tokenreviews.authentication.k8s.io REST API. This is required to + // honor bearer tokens that are provisioned by an external authentication service. + // + // Can only be set if "Type" is set to "None". + // + // +optional + WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` + + // serviceAccountIssuer is the identifier of the bound service account token + // issuer. + // The default is https://kubernetes.default.svc + // WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the + // previous issuer value. Instead, the tokens issued by previous service account issuer will continue to + // be trusted for a time period chosen by the platform (currently set to 24h). + // This time period is subject to change over time. + // This allows internal components to transition to use new service account issuer without service distruption. + // +optional + ServiceAccountIssuer string `json:"serviceAccountIssuer"` + + // oidcProviders are OIDC identity providers that can issue tokens for this cluster + // Can only be set if "Type" is set to "OIDC". + // + // At most one provider can be configured. + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=1 + // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + // +optional + OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` +} + +type AuthenticationStatus struct { + // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for the in-cluster integrated OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This contains the observed value based on cluster state. + // An explicitly set value in spec.oauthMetadata has precedence over this field. + // This field has no meaning if authentication spec.type is not set to IntegratedOAuth. + // The key "oauthMetadata" is used to locate the data. + // If the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config-managed. + // +optional + IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` + + // oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. + // + // +listType=map + // +listMapKey=componentNamespace + // +listMapKey=componentName + // +kubebuilder:validation:MaxItems=20 + // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +optional + OIDCClients []OIDCClientStatus `json:"oidcClients"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings,enum="";None;IntegratedOAuth;OIDC +type AuthenticationType string + +const ( + // None means that no cluster managed authentication system is in place. + // Note that user login will only work if a manually configured system is in place and referenced in authentication spec via oauthMetadata and + // webhookTokenAuthenticator/oidcProviders + AuthenticationTypeNone AuthenticationType = "None" + + // IntegratedOAuth refers to the cluster managed OAuth server. + // It is configured via the top level OAuth config. + AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" + + // AuthenticationTypeOIDC refers to a configuration with an external + // OIDC server configured directly with the kube-apiserver. + AuthenticationTypeOIDC AuthenticationType = "OIDC" +) + +// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. +// It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. +type DeprecatedWebhookTokenAuthenticator struct { + // kubeConfig contains kube config file data which describes how to access the remote webhook service. + // For further details, see: + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // The namespace for this secret is determined by the point of use. + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator +type WebhookTokenAuthenticator struct { + // kubeConfig references a secret that contains kube config file data which + // describes how to access the remote webhook service. + // The namespace for the referenced secret is openshift-config. + // + // For further details, see: + // + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // +required + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +const ( + // OAuthMetadataKey is the key for the oauth authorization server metadata + OAuthMetadataKey = "oauthMetadata" + + // KubeConfigKey is the key for the kube config file data in a secret + KubeConfigKey = "kubeConfig" +) + +type OIDCProvider struct { + // name is a required field that configures the unique human-readable identifier associated with the identity provider. + // It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. + // + // name must not be an empty string (""). + // + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server. + // + // +required + Issuer TokenIssuer `json:"issuer"` + + // oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. + // oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. + // + // +listType=map + // +listMapKey=componentNamespace + // +listMapKey=componentName + // +kubebuilder:validation:MaxItems=20 + // +optional + OIDCClients []OIDCClientConfig `json:"oidcClients"` + + // claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity. + // + // +required + ClaimMappings TokenClaimMappings `json:"claimMappings"` + + // claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. + // + // Validation rules are joined via an AND operation. + // + // +listType=atomic + // +optional + ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` + + // userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. + // Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. + // If any rule in the chain of rules evaluates to 'false', authentication will fail. + // When specified, at least one rule must be specified and no more than 64 rules may be specified. + // + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=expression + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + UserValidationRules []TokenUserValidationRule `json:"userValidationRules,omitempty"` +} + +// +kubebuilder:validation:MinLength=1 +type TokenAudience string + +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="self.?discoveryURL.orValue(\"\").size() > 0 ? (self.issuerURL.size() == 0 || self.discoveryURL.find('^.+[^/]') != self.issuerURL.find('^.+[^/]')) : true",message="discoveryURL must be different from issuerURL" +type TokenIssuer struct { + // issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + // The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + // + // Must be at least 1 character and must not exceed 512 characters in length. + // Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. + // + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'",message="must use the 'https' scheme" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getQuery() == {}",message="must not have a query" + // +kubebuilder:validation:XValidation:rule="self.find('#(.+)$') == ''",message="must not have a fragment" + // +kubebuilder:validation:XValidation:rule="self.find('@') == ''",message="must not have user info" + // +kubebuilder:validation:MaxLength=512 + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"issuerURL"` + + // audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. + // At least one of the entries must match the 'aud' claim in the JWT token. + // + // audiences must contain at least one entry and must not exceed ten entries. + // + // +listType=set + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +required + Audiences []TokenAudience `json:"audiences"` + + // issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. + // + // When not specified, the system trust is used. + // + // When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. + // + // +optional + CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` + // discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. + // By default, the discovery URL is derived from `issuerURL` as "{issuerURL}/.well-known/openid-configuration". + // + // The discoveryURL must be a valid absolute HTTPS URL. + // It must not contain query parameters, user information, or fragments. + // Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). + // The discoveryURL value must be at least 1 character long and no longer than 2048 characters. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="discoveryURL must be a valid URL" + // +kubebuilder:validation:XValidation:rule="url(self).getScheme() == 'https'",message="discoveryURL must be a valid https URL" + // +kubebuilder:validation:XValidation:rule="url(self).getQuery().size() == 0",message="discoveryURL must not contain query parameters" + // +kubebuilder:validation:XValidation:rule="self.matches('^[^#]*$')",message="discoveryURL must not contain fragments" + // +kubebuilder:validation:XValidation:rule="!self.matches('^https://.+:.+@.+/.*$')",message="discoveryURL must not contain user info" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + DiscoveryURL string `json:"discoveryURL,omitempty"` +} + +type TokenClaimMappings struct { + // username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + // + // +required + Username UsernameClaimMapping `json:"username"` + + // groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + // + // When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + // + // For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. + // + // +optional + Groups PrefixedClaimMapping `json:"groups,omitempty"` + + // uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. + // + // When using uid.claim to specify the claim it must be a single string value. + // When using uid.expression the expression must result in a single string value. + // + // When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + // + // The current default is to use the 'sub' claim. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + UID *TokenClaimOrExpressionMapping `json:"uid,omitempty"` + + // extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. + // When omitted, no extra attributes will be present on the cluster identity. + // + // key values for extra mappings must be unique. + // A maximum of 32 extra attribute mappings may be provided. + // + // +optional + // +kubebuilder:validation:MaxItems=32 + // +listType=map + // +listMapKey=key + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + Extra []ExtraMapping `json:"extra,omitempty"` +} + +// TokenClaimMapping allows specifying a JWT token claim to be used when mapping claims from an authentication token to cluster identities. +type TokenClaimMapping struct { + // claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. + // + // +required + Claim string `json:"claim"` +} + +// TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities. +// +kubebuilder:validation:XValidation:rule="has(self.claim) ? !has(self.expression) : has(self.expression)",message="precisely one of claim or expression must be set" +type TokenClaimOrExpressionMapping struct { + // claim is an optional field for specifying the JWT token claim that is used in the mapping. + // The value of this claim will be assigned to the field in which this mapping is associated. + // + // Precisely one of claim or expression must be set. + // claim must not be specified when expression is set. + // When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:MinLength=1 + Claim string `json:"claim,omitempty"` + + // expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. + // + // CEL expressions have access to the token claims through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // Precisely one of claim or expression must be set. + // expression must not be specified when claim is set. + // When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:MinLength=1 + Expression string `json:"expression,omitempty"` +} + +// ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. +// It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. +type ExtraMapping struct { + // key is a required field that specifies the string to use as the extra attribute key. + // + // key must be a domain-prefix path (e.g 'example.org/foo'). + // key must not exceed 510 characters in length. + // key must contain the '/' character, separating the domain and path characters. + // key must not be empty. + // + // The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. + // It must not exceed 253 characters in length. + // It must start and end with an alphanumeric character. + // It must only contain lower case alphanumeric characters and '-' or '.'. + // It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". + // + // The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + // It must not exceed 256 characters in length. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=510 + // +kubebuilder:validation:XValidation:rule="self.contains('/')",message="key must contain the '/' character" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="the domain of the key must consist of only lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].size() <= 253",message="the domain of the key must not exceed 253 characters in length" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'kubernetes.io'",message="the domain 'kubernetes.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.kubernetes.io')",message="the subdomains '*.kubernetes.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'k8s.io'",message="the domain 'k8s.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.k8s.io')",message="the subdomains '*.k8s.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'openshift.io'",message="the domain 'openshift.io' is reserved for OpenShift use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.openshift.io')",message="the subdomains '*.openshift.io' are reserved for OpenShift use" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].matches('[A-Za-z0-9/\\\\-._~%!$&\\'()*+;=:]+')",message="the path of the key must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, apostrophe, '-', '.', '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=', and ':'" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].size() <= 256",message="the path of the key must not exceed 256 characters in length" + Key string `json:"key"` + + // valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. + // valueExpression must produce a string or string array value. + // "", [], and null are treated as the extra mapping not being present. + // Empty string values within an array are filtered out. + // + // CEL expressions have access to the token claims through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // valueExpression must not exceed 1024 characters in length. + // valueExpression must not be empty. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + ValueExpression string `json:"valueExpression"` +} + +// OIDCClientConfig configures how platform clients interact with identity providers as an authentication method. +type OIDCClientConfig struct { + // componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + // + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + ComponentName string `json:"componentName"` + + // componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + // + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + ComponentNamespace string `json:"componentNamespace"` + + // clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + // The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. + // + // clientID must not be an empty string (""). + // + // +kubebuilder:validation:MinLength=1 + // +required + ClientID string `json:"clientID"` + + // clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. + // + // When not specified, no client secret will be used when making authentication requests to the identity provider. + // + // When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. + // + // The client secret will be used when making authentication requests to the identity provider. + // + // Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. + // + // +optional + ClientSecret SecretNameReference `json:"clientSecret"` + + // extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + // This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. + // + // When omitted, no additional scopes are requested. + // + // +listType=set + // +optional + ExtraScopes []string `json:"extraScopes"` +} + +// OIDCClientStatus represents the current state +// of platform components and how they interact with +// the configured identity providers. +type OIDCClientStatus struct { + // componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + ComponentName string `json:"componentName"` + + // componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + // + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + ComponentNamespace string `json:"componentNamespace"` + + // currentOIDCClients is an optional list of clients that the component is currently using. + // + // Entries must have unique issuerURL/clientID pairs. + // + // +listType=map + // +listMapKey=issuerURL + // +listMapKey=clientID + // +optional + CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` + + // consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. + // + // consumingUsers must not exceed 5 entries. + // + // +kubebuilder:validation:MaxItems=5 + // +listType=set + // +optional + ConsumingUsers []ConsumingUser `json:"consumingUsers"` + + // conditions are used to communicate the state of the `oidcClients` entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If Available is true, the component is successfully using the configured client. + // If Degraded is true, that means something has gone wrong trying to handle the client configuration. + // If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. + // + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// OIDCClientReference is a reference to a platform component +// client configuration. +type OIDCClientReference struct { + // oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. + // + // oidcProviderName must not be an empty string (""). + // + // +kubebuilder:validation:MinLength=1 + // +required + OIDCProviderName string `json:"oidcProviderName"` + + // issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. + // + // issuerURL must use the 'https' scheme. + // + // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +required + IssuerURL string `json:"issuerURL"` + + // clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. + // + // clientID must not be empty. + // + // +kubebuilder:validation:MinLength=1 + // +required + ClientID string `json:"clientID"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +// +union +type UsernameClaimMapping struct { + // claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. + // + // claim must not be an empty string ("") and must not exceed 256 characters. + // + // +required + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=256 + Claim string `json:"claim"` + + // prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. + // + // Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). + // + // When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + // + // The prefix field must be set when prefixPolicy is 'Prefix'. + // + // When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + // + // When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. + // + // As an example, consider the following scenario: + // + // `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // - "username": the mapped value will be "https://myoidc.tld#userA" + // - "email": the mapped value will be "userA@myoidc.tld" + // + // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + // +optional + // +unionDiscriminator + PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` + + // prefix configures the prefix that should be prepended to the value of the JWT claim. + // + // prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. + // + // +optional + // +unionMember + Prefix *UsernamePrefix `json:"prefix"` +} + +// UsernamePrefixPolicy configures how prefixes should be applied to values extracted from the JWT claims during the process of mapping JWT claims to cluster identity attributes. +// +enum +type UsernamePrefixPolicy string + +var ( + // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix + // If the username claim is anything else, it is prefixed by the issuerURL + NoOpinion UsernamePrefixPolicy = "" + + // NoPrefix means the username claim value will not have any prefix + NoPrefix UsernamePrefixPolicy = "NoPrefix" + + // Prefix means the prefix value must be specified. It cannot be empty + Prefix UsernamePrefixPolicy = "Prefix" +) + +// UsernamePrefix configures the string that should +// be used as a prefix for username claim mappings. +type UsernamePrefix struct { + // prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. + // + // prefixString must not be an empty string (""). + // + // +kubebuilder:validation:MinLength=1 + // +required + PrefixString string `json:"prefixString"` +} + +// PrefixedClaimMapping configures a claim mapping +// that allows for an optional prefix. +type PrefixedClaimMapping struct { + TokenClaimMapping `json:",inline"` + + // prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. + // + // When omitted (""), no prefix is applied to the cluster identity attribute. + // + // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". + // + // +optional + Prefix string `json:"prefix"` +} + +// TokenValidationRuleType defines the type of token validation rule. +// +enum +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDCWithUIDAndExtraClaimMappings,enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDCWithUpstreamParity,enum="RequiredClaim";"CEL" +type TokenValidationRuleType string + +const ( + // TokenValidationRuleTypeRequiredClaim indicates that the token must contain a specific claim. + // Used as a value for TokenValidationRuleType. + TokenValidationRuleTypeRequiredClaim = "RequiredClaim" + // TokenValidationRuleTypeCEL indicates that the token validation is defined via a CEL expression. + // Used as a value for TokenValidationRuleType. + TokenValidationRuleTypeCEL = "CEL" +) + +// TokenClaimValidationRule represents a validation rule based on token claims. +// If type is RequiredClaim, requiredClaim must be set. +// If Type is CEL, CEL must be set and RequiredClaim must be omitted. +// +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'RequiredClaim' ? has(self.requiredClaim) : !has(self.requiredClaim)",message="requiredClaim must be set when type is 'RequiredClaim', and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.type) && self.type == 'CEL' ? has(self.cel) : !has(self.cel)",message="cel must be set when type is 'CEL', and forbidden otherwise" +type TokenClaimValidationRule struct { + // type is an optional field that configures the type of the validation rule. + // + // Allowed values are "RequiredClaim" and "CEL". + // + // When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. + // + // When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. + // +required + Type TokenValidationRuleType `json:"type"` + + // requiredClaim allows configuring a required claim name and its expected value. + // This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + // The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. + // + // +optional + RequiredClaim *TokenRequiredClaim `json:"requiredClaim,omitempty"` + + // cel holds the CEL expression and message for validation. + // Must be set when Type is "CEL", and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + CEL TokenClaimValidationCELRule `json:"cel,omitempty,omitzero"` +} + +type TokenRequiredClaim struct { + // claim is a required field that configures the name of the required claim. + // When taken from the JWT claims, claim must be a string value. + // + // claim must not be an empty string (""). + // + // +kubebuilder:validation:MinLength=1 + // +required + Claim string `json:"claim"` + + // requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + // If the value in the JWT claims does not match, the token will be rejected for authentication. + // + // requiredValue must not be an empty string (""). + // + // +kubebuilder:validation:MinLength=1 + // +required + RequiredValue string `json:"requiredValue"` +} + +type TokenClaimValidationCELRule struct { + // expression is a CEL expression evaluated against token claims. + // expression is required, must be at least 1 character in length and must not exceed 1024 characters. + // The expression must return a boolean value where 'true' signals a valid token and 'false' an invalid one. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +required + Expression string `json:"expression,omitempty"` + + // message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. + // message must be at least 1 character in length and must not exceed 256 characters. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Message string `json:"message,omitempty"` +} + +// TokenUserValidationRule provides a CEL-based rule used to validate a token subject. +// Each rule contains a CEL expression that is evaluated against the token’s claims. +type TokenUserValidationRule struct { + // expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. + // + // The expression must evaluate to a boolean value. + // When the expression evaluates to 'true', the cluster user identity is considered valid. + // When the expression evaluates to 'false', the cluster user identity is not considered valid. + // expression must be at least 1 character in length and must not exceed 1024 characters. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + Expression string `json:"expression,omitempty"` + // message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. + // message must be at least 1 character in length and must not exceed 256 characters. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Message string `json:"message,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_build.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_build.go new file mode 100644 index 0000000000..dcde1fc5b8 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_build.go @@ -0,0 +1,132 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configures the behavior of OpenShift builds for the entire cluster. +// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. +// +// The canonical name is "cluster" +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=openshift-controller-manager,operatorOrdering=01 +// +openshift:capability=Build +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=builds,scope=Cluster +// +kubebuilder:subresource:status +type Build struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user-settable values for the build controller configuration + // +required + Spec BuildSpec `json:"spec"` +} + +type BuildSpec struct { + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted for image pushes and pulls during builds. + // The namespace for this config map is openshift-config. + // + // DEPRECATED: Additional CAs for image pull and push should be set on + // image.config.openshift.io/cluster instead. + // + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + // buildDefaults controls the default information for Builds + // +optional + BuildDefaults BuildDefaults `json:"buildDefaults"` + // buildOverrides controls override settings for builds + // +optional + BuildOverrides BuildOverrides `json:"buildOverrides"` +} + +type BuildDefaults struct { + // defaultProxy contains the default proxy settings for all build operations, including image pull/push + // and source download. + // + // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables + // in the build config's strategy. + // +optional + DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` + + // gitProxy contains the proxy settings for git operations only. If set, this will override + // any Proxy settings for all git commands, such as git clone. + // + // Values that are not set here will be inherited from DefaultProxy. + // +optional + GitProxy *ProxySpec `json:"gitProxy,omitempty"` + + // env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // imageLabels is a list of docker labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // resources defines resource requirements to execute the build. + // +optional + Resources corev1.ResourceRequirements `json:"resources"` +} + +type ImageLabel struct { + // name defines the name of the label. It must have non-zero length. + Name string `json:"name"` + + // value defines the literal value of the label. + // +optional + Value string `json:"value,omitempty"` +} + +type BuildOverrides struct { + // imageLabels is a list of docker labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // forcePull overrides, if set, the equivalent value in the builds, + // i.e. false disables force pull for all builds, + // true enables force pull for all builds, + // independently of what each build specifies itself + // +optional + ForcePull *bool `json:"forcePull,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Build `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go new file mode 100644 index 0000000000..491390098c --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go @@ -0,0 +1,87 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicy holds cluster-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec contains the configuration for the cluster image policy. + // +required + Spec ClusterImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ClusterImagePolicyStatus `json:"status"` +} + +// CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource. +type ClusterImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy ImageSigstoreVerificationPolicy `json:"policy"` +} + +// +k8s:deepcopy-gen=true +type ClusterImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicyList is a list of ClusterImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ClusterImagePolices + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ClusterImagePolicy `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go new file mode 100644 index 0000000000..8323040389 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -0,0 +1,225 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOperator holds the status of a core or optional OpenShift component +// managed by the Cluster Version Operator (CVO). This object is used by +// operators to convey their state to the rest of the cluster. +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusteroperators,scope=Cluster,shortName=co +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.versions[?(@.name=="operator")].version,type=string,description=The version the operator is at. +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string,description=Whether the operator is running and stable. +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string,description=Whether the operator is processing changes. +// +kubebuilder:printcolumn:name=Degraded,JSONPath=.status.conditions[?(@.type=="Degraded")].status,type=string,description=Whether the operator is degraded. +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Available")].lastTransitionTime,type=date,description=The time the operator's Available status last changed. +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +type ClusterOperator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec holds configuration that could apply to any operator. + // +required + Spec ClusterOperatorSpec `json:"spec"` + + // status holds the information about the state of an operator. It is consistent with status information across + // the Kubernetes ecosystem. + // +optional + Status ClusterOperatorStatus `json:"status"` +} + +// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause". +type ClusterOperatorSpec struct { +} + +// ClusterOperatorStatus provides information about the status of the operator. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatus struct { + // conditions describes the state of the operator's managed and monitored components. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple + // operand entries in the array. Available operators must report the version of the operator itself with the name "operator". + // An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + // +optional + Versions []OperandVersion `json:"versions,omitempty"` + + // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: + // 1. the detailed resource driving the operator + // 2. operator namespaces + // 3. operand namespaces + // +optional + RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` + + // extension contains any additional status information specific to the + // operator which owns this status object. + // +nullable + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Extension runtime.RawExtension `json:"extension"` +} + +type OperandVersion struct { + // name is the name of the particular operand this version is for. It usually matches container images, not operators. + // +required + Name string `json:"name"` + + // version indicates which version of a particular operand is currently being managed. It must always match the Available + // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout + // 1.1.0 + // +required + Version string `json:"version"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // group of the referent. + // +required + Group string `json:"group"` + // resource of the referent. + // +required + Resource string `json:"resource"` + // namespace of the referent. + // +optional + Namespace string `json:"namespace,omitempty"` + // name of the referent. + // +required + Name string `json:"name"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ClusterOperatorStatusCondition represents the state of the operator's +// managed and monitored components. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatusCondition struct { + // type specifies the aspect reported by this condition. + // +required + Type ClusterStatusConditionType `json:"type"` + + // status of the condition, one of True, False, Unknown. + // +required + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the time of the last update to the current status property. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // reason is the CamelCase reason for the condition's current status. + // +optional + Reason string `json:"reason,omitempty"` + + // message provides additional information about the current condition. + // This is only to be consumed by humans. It may contain Line Feed + // characters (U+000A), which should be rendered as new lines. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterStatusConditionType is an aspect of operator state. +type ClusterStatusConditionType string + +const ( + // Available indicates that the component (operator and all configured operands) + // is functional and available in the cluster. Available=False means at least + // part of the component is non-functional, and that the condition requires + // immediate administrator intervention. + // A component must not report Available=False during the course of a normal upgrade. + OperatorAvailable ClusterStatusConditionType = "Available" + + // Progressing indicates that the component (operator and all configured operands) + // is actively rolling out new code, propagating config changes (e.g, a version change), or otherwise + // moving from one steady state to another. Operators should not report + // Progressing when they are reconciling (without action) a previously known + // state. Operators should not report Progressing only because DaemonSets owned by them + // are adjusting to a new node from cluster scaleup or a node rebooting from cluster upgrade. + // If the observed cluster state has changed and the component is + // reacting to it (updated proxy configuration for instance), Progressing should become true + // since it is moving from one steady state to another. + // A component in a cluster with less than 250 nodes must complete a version + // change within a limited period of time: 90 minutes for Machine Config Operator and 20 minutes for others. + // Machine Config Operator is given more time as it needs to restart control plane nodes. + OperatorProgressing ClusterStatusConditionType = "Progressing" + + // Degraded indicates that the component (operator and all configured operands) + // does not match its desired state over a period of time resulting in a lower + // quality of service. The period of time may vary by component, but a Degraded + // state represents persistent observation of a condition. As a result, a + // component should not oscillate in and out of Degraded state. A component may + // be Available even if its degraded. For example, a component may desire 3 + // running pods, but 1 pod is crash-looping. The component is Available but + // Degraded because it may have a lower quality of service. A component may be + // Progressing but not Degraded because the transition from one state to + // another does not persist over a long enough period to report Degraded. A + // component must not report Degraded during the course of a normal upgrade. + // A component may report Degraded in response to a persistent infrastructure + // failure that requires eventual administrator intervention. For example, if + // a control plane host is unhealthy and must be replaced. A component should + // report Degraded if unexpected errors occur over a period, but the + // expectation is that all unexpected errors are handled as operators mature. + OperatorDegraded ClusterStatusConditionType = "Degraded" + + // Upgradeable indicates whether the component (operator and all configured + // operands) is safe to upgrade based on the current cluster state. When + // Upgradeable is False, the cluster-version operator will prevent the + // cluster from performing impacted updates unless forced. When set on + // ClusterVersion, the message will explain which updates (minor or patch) + // are impacted. When set on ClusterOperator, False will block minor + // OpenShift updates. The message field should contain a human readable + // description of what the administrator should do to allow the cluster or + // component to successfully update. The cluster-version operator will + // allow updates when this condition is not False, including when it is + // missing, True, or Unknown. + OperatorUpgradeable ClusterStatusConditionType = "Upgradeable" + + // EvaluationConditionsDetected is used to indicate the result of the detection + // logic that was added to a component to evaluate the introduction of an + // invasive change that could potentially result in highly visible alerts, + // breakages or upgrade failures. You can concatenate multiple Reason using + // the "::" delimiter if you need to evaluate the introduction of multiple changes. + EvaluationConditionsDetected ClusterStatusConditionType = "EvaluationConditionsDetected" +) + +// ClusterOperatorList is a list of OperatorStatus resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterOperatorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ClusterOperator `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_version.go new file mode 100644 index 0000000000..5f36f693de --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -0,0 +1,981 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersion is the configuration for the ClusterVersionOperator. This is where +// parameters related to automatic updates can be set. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/495 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterversions,scope=Cluster +// +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'marketplace' in self.spec.capabilities.additionalEnabledCapabilities ? 'OperatorLifecycleManager' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'OperatorLifecycleManager' in self.status.capabilities.enabledCapabilities) : true",message="the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability" +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.history[?(@.state=="Completed")].version,type=string +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Progressing")].lastTransitionTime,type=date +// +kubebuilder:printcolumn:name=Status,JSONPath=.status.conditions[?(@.type=="Progressing")].message,type=string +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +type ClusterVersion struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the desired state of the cluster version - the operator will work + // to ensure that the desired version is applied to the cluster. + // +required + Spec ClusterVersionSpec `json:"spec"` + // status contains information about the available updates and any in-progress + // updates. + // +optional + Status ClusterVersionStatus `json:"status"` +} + +// ClusterVersionSpec is the desired version state of the cluster. It includes +// the version the cluster should be at, how the cluster is identified, and +// where the cluster should look for version updates. +// +k8s:deepcopy-gen=true +type ClusterVersionSpec struct { + // clusterID uniquely identifies this cluster. This is expected to be + // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in + // hexadecimal values). This is a required field. + // +required + ClusterID ClusterID `json:"clusterID"` + + // desiredUpdate is an optional field that indicates the desired value of + // the cluster version. Setting this value will trigger an upgrade (if + // the current version does not match the desired version). The set of + // recommended update values is listed as part of available updates in + // status, and setting values outside that range may cause the upgrade + // to fail. + // + // Some of the fields are inter-related with restrictions and meanings described here. + // 1. image is specified, version is specified, architecture is specified. API validation error. + // 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. + // 3. image is specified, version is not specified, architecture is specified. API validation error. + // 4. image is specified, version is not specified, architecture is not specified. image is used. + // 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. + // 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. + // 7. image is not specified, version is not specified, architecture is specified. API validation error. + // 8. image is not specified, version is not specified, architecture is not specified. API validation error. + // + // If an upgrade fails the operator will halt and report status + // about the failing component. Setting the desired update value back to + // the previous version will cause a rollback to be attempted if the + // previous version is within the current minor version. Not all + // rollbacks will succeed, and some may unrecoverably break the + // cluster. + // + // +optional + DesiredUpdate *Update `json:"desiredUpdate,omitempty"` + + // upstream may be used to specify the preferred update server. By default + // it will use the appropriate update server for the cluster and region. + // + // +optional + Upstream URL `json:"upstream,omitempty"` + // channel is an identifier for explicitly requesting a non-default set + // of updates to be applied to this cluster. The default channel will + // contain stable updates that are appropriate for production clusters. + // + // +optional + Channel string `json:"channel,omitempty"` + + // capabilities configures the installation of optional, core + // cluster components. A null value here is identical to an + // empty object; see the child properties for default semantics. + // +optional + Capabilities *ClusterVersionCapabilitiesSpec `json:"capabilities,omitempty"` + + // signatureStores contains the upstream URIs to verify release signatures and optional + // reference to a config map by name containing the PEM-encoded CA bundle. + // + // By default, CVO will use existing signature stores if this property is empty. + // The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature + // in these stores in parallel only when local ConfigMaps did not include a valid signature. + // Validation will fail if none of the signature stores reply with valid signature before timeout. + // Setting signatureStores will replace the default signature stores with custom signature stores. + // Default stores can be used with custom signature stores by adding them manually. + // + // A maximum of 32 signature stores may be configured. + // +kubebuilder:validation:MaxItems=32 + // +openshift:enable:FeatureGate=SignatureStores + // +listType=map + // +listMapKey=url + // +optional + SignatureStores []SignatureStore `json:"signatureStores"` + + // overrides is list of overides for components that are managed by + // cluster version operator. Marking a component unmanaged will prevent + // the operator from creating or updating the object. + // +listType=map + // +listMapKey=kind + // +listMapKey=group + // +listMapKey=namespace + // +listMapKey=name + // +optional + Overrides []ComponentOverride `json:"overrides,omitempty"` +} + +// ClusterVersionStatus reports the status of the cluster versioning, +// including any upgrades that are in progress. The current field will +// be set to whichever version the cluster is reconciling to, and the +// conditions array will report whether the update succeeded, is in +// progress, or is failing. +// +k8s:deepcopy-gen=true +type ClusterVersionStatus struct { + // desired is the version that the cluster is reconciling towards. + // If the cluster is not yet fully initialized desired will be set + // with the information available, which may be an image or a tag. + // +required + Desired Release `json:"desired"` + + // history contains a list of the most recent versions applied to the cluster. + // This value may be empty during cluster startup, and then will be updated + // when a new update is being applied. The newest update is first in the + // list and it is ordered by recency. Updates in the history have state + // Completed if the rollout completed - if an update was failing or halfway + // applied the state will be Partial. Only a limited amount of update history + // is preserved. + // +listType=atomic + // +optional + History []UpdateHistory `json:"history,omitempty"` + + // observedGeneration reports which version of the spec is being synced. + // If this value is not equal to metadata.generation, then the desired + // and conditions fields may represent a previous version. + // +required + ObservedGeneration int64 `json:"observedGeneration"` + + // versionHash is a fingerprint of the content that the cluster will be + // updated with. It is used by the operator to avoid unnecessary work + // and is for internal use only. + // +required + VersionHash string `json:"versionHash"` + + // capabilities describes the state of optional, core cluster components. + // +optional + Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"` + + // conditions provides information about the cluster version. The condition + // "Available" is set to true if the desiredUpdate has been reached. The + // condition "Progressing" is set to true if an update is being applied. + // The condition "Degraded" is set to true if an update is currently blocked + // by a temporary or permanent error. Conditions are only valid for the + // current desiredUpdate when metadata.generation is equal to + // status.generation. + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // availableUpdates contains updates recommended for this + // cluster. Updates which appear in conditionalUpdates but not in + // availableUpdates may expose this cluster to known issues. This list + // may be empty if no updates are recommended, if the update service + // is unavailable, or if an invalid channel has been specified. + // +nullable + // +listType=atomic + // +required + AvailableUpdates []Release `json:"availableUpdates"` + + // conditionalUpdates contains the list of updates that may be + // recommended for this cluster if it meets specific required + // conditions. Consumers interested in the set of updates that are + // actually recommended for this cluster should use + // availableUpdates. This list may be empty if no updates are + // recommended, if the update service is unavailable, or if an empty + // or invalid channel has been specified. + // +kubebuilder:validation:MaxItems=500 + // +listType=atomic + // +optional + ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` + + // conditionalUpdateRisks contains the list of risks associated with conditionalUpdates. + // When performing a conditional update, all its associated risks will be compared with the set of accepted risks in the spec.desiredUpdate.acceptRisks field. + // If all risks for a conditional update are included in the spec.desiredUpdate.acceptRisks set, the conditional update can proceed, otherwise it is blocked. + // The risk names in the list must be unique. + // conditionalUpdateRisks must not contain more than 500 entries. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MaxItems=500 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + // +optional + ConditionalUpdateRisks []ConditionalUpdateRisk `json:"conditionalUpdateRisks,omitempty"` +} + +// UpdateState is a constant representing whether an update was successfully +// applied to the cluster or not. +type UpdateState string + +const ( + // CompletedUpdate indicates an update was successfully applied + // to the cluster (all resource updates were successful). + CompletedUpdate UpdateState = "Completed" + // PartialUpdate indicates an update was never completely applied + // or is currently being applied. + PartialUpdate UpdateState = "Partial" +) + +// UpdateHistory is a single attempted update to the cluster. +type UpdateHistory struct { + // state reflects whether the update was fully applied. The Partial state + // indicates the update is not fully applied, while the Completed state + // indicates the update was successfully rolled out at least once (all + // parts of the update successfully applied). + // +required + State UpdateState `json:"state"` + + // startedTime is the time at which the update was started. + // +required + StartedTime metav1.Time `json:"startedTime"` + + // completionTime, if set, is when the update was fully applied. The update + // that is currently being applied will have a null completion time. + // Completion time will always be set for entries that are not the current + // update (usually to the started time of the next update). + // +required + // +nullable + CompletionTime *metav1.Time `json:"completionTime"` + + // version is a semantic version identifying the update version. If the + // requested image does not define a version, or if a failure occurs + // retrieving the image, this value may be empty. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. This value + // is always populated. + // +required + Image string `json:"image"` + + // verified indicates whether the provided update was properly verified + // before it was installed. If this is false the cluster may not be trusted. + // Verified does not cover upgradeable checks that depend on the cluster + // state at the time when the update target was accepted. + // +required + Verified bool `json:"verified"` + + // acceptedRisks records risks which were accepted to initiate the update. + // For example, it may mention an Upgradeable=False or missing signature + // that was overridden via desiredUpdate.force, or an update that was + // initiated despite not being in the availableUpdates set of recommended + // update targets. + // +optional + AcceptedRisks string `json:"acceptedRisks,omitempty"` +} + +// ClusterID is string RFC4122 uuid. +type ClusterID string + +// ClusterVersionArchitecture enumerates valid cluster architectures. +// +kubebuilder:validation:Enum="Multi";"" +type ClusterVersionArchitecture string + +const ( + // ClusterVersionArchitectureMulti identifies a multi architecture. A multi + // architecture cluster is capable of running nodes with multiple architectures. + ClusterVersionArchitectureMulti ClusterVersionArchitecture = "Multi" +) + +// ClusterVersionCapability enumerates optional, core cluster components. +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager;OperatorLifecycleManagerV1 +type ClusterVersionCapability string + +const ( + // ClusterVersionCapabilityOpenShiftSamples manages the sample + // image streams and templates stored in the openshift + // namespace, and any registry credentials, stored as a secret, + // needed for the image streams to import the images they + // reference. + ClusterVersionCapabilityOpenShiftSamples ClusterVersionCapability = "openshift-samples" + + // ClusterVersionCapabilityBaremetal manages the cluster + // baremetal operator which is responsible for running the metal3 + // deployment. + ClusterVersionCapabilityBaremetal ClusterVersionCapability = "baremetal" + + // ClusterVersionCapabilityMarketplace manages the Marketplace operator which + // supplies Operator Lifecycle Manager (OLM) users with default catalogs of + // "optional" operators. + // + // Note that Marketplace has a hard requirement on OLM. OLM can not be disabled + // while Marketplace is enabled. + ClusterVersionCapabilityMarketplace ClusterVersionCapability = "marketplace" + + // ClusterVersionCapabilityConsole manages the Console operator which + // installs and maintains the web console. + ClusterVersionCapabilityConsole ClusterVersionCapability = "Console" + + // ClusterVersionCapabilityInsights manages the Insights operator which + // collects anonymized information about the cluster to generate + // recommendations for possible cluster issues. + ClusterVersionCapabilityInsights ClusterVersionCapability = "Insights" + + // ClusterVersionCapabilityStorage manages the storage operator which + // is responsible for providing cluster-wide storage defaults + // WARNING: Do not disable this capability when deployed to + // RHEV and OpenStack without reading the docs. + // These clusters heavily rely on that capability and may cause + // damage to the cluster. + ClusterVersionCapabilityStorage ClusterVersionCapability = "Storage" + + // ClusterVersionCapabilityCSISnapshot manages the csi snapshot + // controller operator which is responsible for watching the + // VolumeSnapshot CRD objects and manages the creation and deletion + // lifecycle of volume snapshots + ClusterVersionCapabilityCSISnapshot ClusterVersionCapability = "CSISnapshot" + + // ClusterVersionCapabilityNodeTuning manages the Node Tuning Operator + // which is responsible for watching the Tuned and Profile CRD + // objects and manages the containerized TuneD daemon which controls + // system level tuning of Nodes + ClusterVersionCapabilityNodeTuning ClusterVersionCapability = "NodeTuning" + + // ClusterVersionCapabilityMachineAPI manages + // machine-api-operator + // cluster-autoscaler-operator + // cluster-control-plane-machine-set-operator + // which is responsible for machines configuration and heavily + // targeted for SNO clusters. + // + // The following CRDs are disabled as well + // machines + // machineset + // controlplanemachineset + // + // WARNING: Do not disable that capability without reading + // documentation. This is important part of openshift system + // and may cause cluster damage + ClusterVersionCapabilityMachineAPI ClusterVersionCapability = "MachineAPI" + + // ClusterVersionCapabilityBuild manages the Build API which is responsible + // for watching the Build API objects and managing their lifecycle. + // The functionality is located under openshift-apiserver and openshift-controller-manager. + // + // The following resources are taken into account: + // - builds + // - buildconfigs + ClusterVersionCapabilityBuild ClusterVersionCapability = "Build" + + // ClusterVersionCapabilityDeploymentConfig manages the DeploymentConfig API + // which is responsible for watching the DeploymentConfig API and managing their lifecycle. + // The functionality is located under openshift-apiserver and openshift-controller-manager. + // + // The following resources are taken into account: + // - deploymentconfigs + ClusterVersionCapabilityDeploymentConfig ClusterVersionCapability = "DeploymentConfig" + + // ClusterVersionCapabilityImageRegistry manages the image registry which + // allows to distribute Docker images + ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry" + + // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager (legacy) + // which itself manages the lifecycle of operators + ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager" + + // ClusterVersionCapabilityOperatorLifecycleManagerV1 manages the Operator Lifecycle Manager (v1) + // which itself manages the lifecycle of operators + ClusterVersionCapabilityOperatorLifecycleManagerV1 ClusterVersionCapability = "OperatorLifecycleManagerV1" + + // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers + // in openshift cluster + ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential" + + // ClusterVersionCapabilityIngress manages the cluster ingress operator + // which is responsible for running the ingress controllers (including OpenShift router). + // + // The following CRDs are part of the capability as well: + // IngressController + // DNSRecord + // GatewayClass + // Gateway + // HTTPRoute + // ReferenceGrant + // + // WARNING: This capability cannot be disabled on the standalone OpenShift. + ClusterVersionCapabilityIngress ClusterVersionCapability = "Ingress" + + // ClusterVersionCapabilityCloudControllerManager manages various Cloud Controller + // Managers deployed on top of OpenShift. They help you to work with cloud + // provider API and embeds cloud-specific control logic. + ClusterVersionCapabilityCloudControllerManager ClusterVersionCapability = "CloudControllerManager" +) + +// KnownClusterVersionCapabilities includes all known optional, core cluster components. +var KnownClusterVersionCapabilities = []ClusterVersionCapability{ + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, +} + +// ClusterVersionCapabilitySet defines sets of cluster version capabilities. +// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;v4.16;v4.17;v4.18;vCurrent +type ClusterVersionCapabilitySet string + +const ( + // ClusterVersionCapabilitySetNone is an empty set enabling + // no optional capabilities. + ClusterVersionCapabilitySetNone ClusterVersionCapabilitySet = "None" + + // ClusterVersionCapabilitySet4_11 is the recommended set of + // optional capabilities to enable for the 4.11 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_11 ClusterVersionCapabilitySet = "v4.11" + + // ClusterVersionCapabilitySet4_12 is the recommended set of + // optional capabilities to enable for the 4.12 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_12 ClusterVersionCapabilitySet = "v4.12" + + // ClusterVersionCapabilitySet4_13 is the recommended set of + // optional capabilities to enable for the 4.13 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_13 ClusterVersionCapabilitySet = "v4.13" + + // ClusterVersionCapabilitySet4_14 is the recommended set of + // optional capabilities to enable for the 4.14 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_14 ClusterVersionCapabilitySet = "v4.14" + + // ClusterVersionCapabilitySet4_15 is the recommended set of + // optional capabilities to enable for the 4.15 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_15 ClusterVersionCapabilitySet = "v4.15" + + // ClusterVersionCapabilitySet4_16 is the recommended set of + // optional capabilities to enable for the 4.16 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_16 ClusterVersionCapabilitySet = "v4.16" + + // ClusterVersionCapabilitySet4_17 is the recommended set of + // optional capabilities to enable for the 4.17 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_17 ClusterVersionCapabilitySet = "v4.17" + + // ClusterVersionCapabilitySet4_18 is the recommended set of + // optional capabilities to enable for the 4.18 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_18 ClusterVersionCapabilitySet = "v4.18" + + // ClusterVersionCapabilitySetCurrent is the recommended set + // of optional capabilities to enable for the cluster's + // current version of OpenShift. + ClusterVersionCapabilitySetCurrent ClusterVersionCapabilitySet = "vCurrent" +) + +// ClusterVersionCapabilitySets defines sets of cluster version capabilities. +var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVersionCapability{ + ClusterVersionCapabilitySetNone: {}, + ClusterVersionCapabilitySet4_11: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityMachineAPI, + }, + ClusterVersionCapabilitySet4_12: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityMachineAPI, + }, + ClusterVersionCapabilitySet4_13: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + }, + ClusterVersionCapabilitySet4_14: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + }, + ClusterVersionCapabilitySet4_15: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + }, + ClusterVersionCapabilitySet4_16: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, + ClusterVersionCapabilitySet4_17: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, + ClusterVersionCapabilitySet4_18: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, + ClusterVersionCapabilitySetCurrent: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, +} + +// ClusterVersionCapabilitiesSpec selects the managed set of +// optional, core cluster components. +// +k8s:deepcopy-gen=true +type ClusterVersionCapabilitiesSpec struct { + // baselineCapabilitySet selects an initial set of + // optional capabilities to enable, which can be extended via + // additionalEnabledCapabilities. If unset, the cluster will + // choose a default, and the default may change over time. + // The current default is vCurrent. + // +optional + BaselineCapabilitySet ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` + + // additionalEnabledCapabilities extends the set of managed + // capabilities beyond the baseline defined in + // baselineCapabilitySet. The default is an empty set. + // +listType=atomic + // +optional + AdditionalEnabledCapabilities []ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` +} + +// ClusterVersionCapabilitiesStatus describes the state of optional, +// core cluster components. +// +k8s:deepcopy-gen=true +type ClusterVersionCapabilitiesStatus struct { + // enabledCapabilities lists all the capabilities that are currently managed. + // +listType=atomic + // +optional + EnabledCapabilities []ClusterVersionCapability `json:"enabledCapabilities,omitempty"` + + // knownCapabilities lists all the capabilities known to the current cluster. + // +listType=atomic + // +optional + KnownCapabilities []ClusterVersionCapability `json:"knownCapabilities,omitempty"` +} + +// ComponentOverride allows overriding cluster version operator's behavior +// for a component. +// +k8s:deepcopy-gen=true +type ComponentOverride struct { + // kind indentifies which object to override. + // +required + Kind string `json:"kind"` + // group identifies the API group that the kind is in. + // +required + Group string `json:"group"` + + // namespace is the component's namespace. If the resource is cluster + // scoped, the namespace should be empty. + // +required + Namespace string `json:"namespace"` + // name is the component's name. + // +required + Name string `json:"name"` + + // unmanaged controls if cluster version operator should stop managing the + // resources in this cluster. + // Default: false + // +required + Unmanaged bool `json:"unmanaged"` +} + +// URL is a thin wrapper around string that ensures the string is a valid URL. +type URL string + +// Update represents an administrator update request. +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == \"\" || self.image == \"\") : true",message="cannot set both Architecture and Image" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != \"\" ? self.version != \"\" : true",message="Version must be set if Architecture is set" +// +k8s:deepcopy-gen=true +type Update struct { + // architecture is an optional field that indicates the desired + // value of the cluster architecture. In this context cluster + // architecture means either a single architecture or a multi + // architecture. architecture can only be set to Multi thereby + // only allowing updates from single to multi architecture. If + // architecture is set, image cannot be set and version must be + // set. + // Valid values are 'Multi' and empty. + // + // +optional + Architecture ClusterVersionArchitecture `json:"architecture"` + + // version is a semantic version identifying the update version. + // version is required if architecture is specified. + // If both version and image are set, the version extracted from the referenced image must match the specified version. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. + // image should be used when the desired version does not exist in availableUpdates or history. + // When image is set, architecture cannot be specified. + // If both version and image are set, the version extracted from the referenced image must match the specified version. + // + // +optional + Image string `json:"image"` + + // force allows an administrator to update to an image that has failed + // verification or upgradeable checks that are designed to keep your + // cluster safe. Only use this if: + // * you are testing unsigned release images in short-lived test clusters or + // * you are working around a known bug in the cluster-version + // operator and you have verified the authenticity of the provided + // image yourself. + // The provided image will run with full administrative access + // to the cluster. Do not use this flag with images that come from unknown + // or potentially malicious sources. + // + // +optional + Force bool `json:"force"` + + // acceptRisks is an optional set of names of conditional update risks that are considered acceptable. + // A conditional update is performed only if all of its risks are acceptable. + // This list may contain entries that apply to current, previous or future updates. + // The entries therefore may not map directly to a risk in .status.conditionalUpdateRisks. + // acceptRisks must not contain more than 1000 entries. + // Entries in this list must be unique. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MaxItems=1000 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + // +optional + AcceptRisks []AcceptRisk `json:"acceptRisks,omitempty"` +} + +// AcceptRisk represents a risk that is considered acceptable. +type AcceptRisk struct { + // name is the name of the acceptable risk. + // It must be a non-empty string and must not exceed 256 characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Name string `json:"name,omitempty"` +} + +// Release represents an OpenShift release image and associated metadata. +// +k8s:deepcopy-gen=true +type Release struct { + // architecture is an optional field that indicates the + // value of the cluster architecture. In this context cluster + // architecture means either a single architecture or a multi + // architecture. + // Valid values are 'Multi' and empty. + // + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + Architecture ClusterVersionArchitecture `json:"architecture,omitempty"` + + // version is a semantic version identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // +required + Version string `json:"version"` + + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // +required + Image string `json:"image"` + + // url contains information about this release. This URL is set by + // the 'url' metadata property on a release or the metadata returned by + // the update API and should be displayed as a link in user + // interfaces. The URL field may not be set for test or nightly + // releases. + // +optional + URL URL `json:"url,omitempty"` + + // channels is the set of Cincinnati channels to which the release + // currently belongs. + // +listType=set + // +optional + Channels []string `json:"channels,omitempty"` +} + +// RetrievedUpdates reports whether available updates have been retrieved from +// the upstream update server. The condition is Unknown before retrieval, False +// if the updates could not be retrieved or recently failed, or True if the +// availableUpdates field is accurate and recent. +const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" + +// ConditionalUpdate represents an update which is recommended to some +// clusters on the version the current cluster is reconciling, but which +// may not be recommended for the current cluster. +type ConditionalUpdate struct { + // release is the target of the update. + // +required + Release Release `json:"release"` + + // riskNames represents the set of the names of conditionalUpdateRisks that are relevant to this update for some clusters. + // The Applies condition of each conditionalUpdateRisks entry declares if that risk applies to this cluster. + // A conditional update is accepted only if each of its risks either does not apply to the cluster or is considered acceptable by the cluster administrator. + // The latter means that the risk names are included in value of the spec.desiredUpdate.acceptRisks field. + // Entries must be unique and must not exceed 256 characters. + // riskNames must not contain more than 500 entries. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MaxLength=256 + // +kubebuilder:validation:MaxItems=500 + // +listType=set + // +optional + RiskNames []string `json:"riskNames,omitempty"` + + // risks represents the range of issues associated with + // updating to the target release. The cluster-version + // operator will evaluate all entries, and only recommend the + // update if there is at least one entry and all entries + // recommend the update. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=200 + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +required + Risks []ConditionalUpdateRisk `json:"risks" patchStrategy:"merge" patchMergeKey:"name"` + + // conditions represents the observations of the conditional update's + // current status. Known types are: + // * Recommended, for whether the update is recommended for the current cluster. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// ConditionalUpdateRisk represents a reason and cluster-state +// for not recommending a conditional update. +// +k8s:deepcopy-gen=true +type ConditionalUpdateRisk struct { + // conditions represents the observations of the conditional update + // risk's current status. Known types are: + // * Applies, for whether the risk applies to the current cluster. + // The condition's types in the list must be unique. + // conditions must not contain more than one entry. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:XValidation:rule="self.exists_one(x, x.type == 'Applies')",message="must contain a condition of type 'Applies'" + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // url contains information about this risk. + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // name is the CamelCase reason for not recommending a + // conditional update, in the event that matchingRules match the + // cluster state. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // message provides additional information about the risk of + // updating, in the event that matchingRules match the cluster + // state. This is only to be consumed by humans. It may + // contain Line Feed characters (U+000A), which should be + // rendered as new lines. + // +kubebuilder:validation:MinLength=1 + // +required + Message string `json:"message"` + + // matchingRules is a slice of conditions for deciding which + // clusters match the risk and which do not. The slice is + // ordered by decreasing precedence. The cluster-version + // operator will walk the slice in order, and stop after the + // first it can successfully evaluate. If no condition can be + // successfully evaluated, the update will not be recommended. + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +required + MatchingRules []ClusterCondition `json:"matchingRules"` +} + +// ClusterCondition is a union of typed cluster conditions. The 'type' +// property determines which of the type-specific properties are relevant. +// When evaluated on a cluster, the condition may match, not match, or +// fail to evaluate. +// +k8s:deepcopy-gen=true +type ClusterCondition struct { + // type represents the cluster-condition type. This defines + // the members and semantics of any additional properties. + // +kubebuilder:validation:Enum={"Always","PromQL"} + // +required + Type string `json:"type"` + + // promql represents a cluster condition based on PromQL. + // +optional + PromQL *PromQLClusterCondition `json:"promql,omitempty"` +} + +// PromQLClusterCondition represents a cluster condition based on PromQL. +type PromQLClusterCondition struct { + // promql is a PromQL query classifying clusters. This query + // query should return a 1 in the match case and a 0 in the + // does-not-match case. Queries which return no time + // series, or which return values besides 0 or 1, are + // evaluation failures. + // +required + PromQL string `json:"promql"` +} + +// ClusterVersionList is a list of ClusterVersion resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterVersionList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ClusterVersion `json:"items"` +} + +// SignatureStore represents the URL of custom Signature Store +type SignatureStore struct { + + // url contains the upstream custom signature store URL. + // url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" + // +required + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the signature store is not honored. + // If the specified ca data is not valid, the signature store is not honored. + // If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_console.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_console.go new file mode 100644 index 0000000000..dc6967bf15 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_console.go @@ -0,0 +1,81 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console holds cluster-wide configuration for the web console, including the +// logout URL, and reports the public URL of the console. The canonical name is +// `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=consoles,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Console struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ConsoleSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ConsoleStatus `json:"status"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + // +optional + Authentication ConsoleAuthentication `json:"authentication"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + // The URL for the console. This will be derived from the host for the route that + // is created for the console. + // +optional + ConsoleURL string `json:"consoleURL"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} + +// ConsoleAuthentication defines a list of optional configuration for console authentication. +type ConsoleAuthentication struct { + // An optional, absolute URL to redirect web browsers to after logging out of + // the console. If not specified, it will redirect to the default login page. + // This is required when using an identity provider that supports single + // sign-on (SSO) such as: + // - OpenID (Keycloak, Azure) + // - RequestHeader (GSSAPI, SSPI, SAML) + // - OAuth (GitHub, GitLab, Google) + // Logging out of the console will destroy the user's token. The logoutRedirect + // provides the user the option to perform single logout (SLO) through the identity + // provider to destroy their single sign-on session. + // +optional + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$` + LogoutRedirect string `json:"logoutRedirect,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_dns.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_dns.go new file mode 100644 index 0000000000..06eb75ccf7 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -0,0 +1,140 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNS holds cluster-wide information about DNS. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type DNS struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec DNSSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status DNSStatus `json:"status"` +} + +type DNSSpec struct { + // baseDomain is the base domain of the cluster. All managed DNS records will + // be sub-domains of this base. + // + // For example, given the base domain `openshift.example.com`, an API server + // DNS record may be created for `cluster-api.openshift.example.com`. + // + // Once set, this field cannot be changed. + BaseDomain string `json:"baseDomain"` + // publicZone is the location where all the DNS records that are publicly accessible to + // the internet exist. + // + // If this field is nil, no public records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PublicZone *DNSZone `json:"publicZone,omitempty"` + // privateZone is the location where all the DNS records that are only available internally + // to the cluster exist. + // + // If this field is nil, no private records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PrivateZone *DNSZone `json:"privateZone,omitempty"` + // platform holds configuration specific to the underlying + // infrastructure provider for DNS. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // +optional + Platform DNSPlatformSpec `json:"platform,omitempty"` +} + +// DNSZone is used to define a DNS hosted zone. +// A zone can be identified by an ID or tags. +type DNSZone struct { + // id is the identifier that can be used to find the DNS hosted zone. + // + // on AWS zone can be fetched using `ID` as id in [1] + // on Azure zone can be fetched using `ID` as a pre-determined name in [2], + // on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + // +optional + ID string `json:"id,omitempty"` + + // tags can be used to query the DNS hosted zone. + // + // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + // +optional + Tags map[string]string `json:"tags,omitempty"` +} + +type DNSStatus struct { + // dnsSuffix (service-ca amongst others) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNSList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []DNS `json:"items"` +} + +// DNSPlatformSpec holds cloud-provider-specific configuration +// for DNS administration. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws configuration is required when platform is AWS, and forbidden otherwise" +type DNSPlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. + // Allowed values: "", "AWS". + // + // Individual components may not support all platforms, + // and must handle unrecognized platforms with best-effort defaults. + // + // +unionDiscriminator + // +required + // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'" + Type PlatformType `json:"type"` + + // aws contains DNS configuration specific to the Amazon Web Services cloud provider. + // +optional + AWS *AWSDNSSpec `json:"aws"` +} + +// AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider. +type AWSDNSSpec struct { + // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing + // operations on the cluster's private hosted zone specified in the cluster DNS config. + // When left empty, no role should be assumed. + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +optional + PrivateZoneIAMRole string `json:"privateZoneIAMRole"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_feature.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_feature.go new file mode 100644 index 0000000000..e111d518ab --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -0,0 +1,158 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Feature holds cluster-wide information about feature gates. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=featuregates,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type FeatureGate struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + // +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed" + Spec FeatureGateSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status FeatureGateStatus `json:"status"` +} + +type FeatureSet string + +var ( + // Default feature set that allows upgrades. + Default FeatureSet = "" + + // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" + + // DevPreviewNoUpgrade turns on dev preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + DevPreviewNoUpgrade FeatureSet = "DevPreviewNoUpgrade" + + // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. + CustomNoUpgrade FeatureSet = "CustomNoUpgrade" + + // OKD turns on features for OKD. Turning this feature set ON is supported for OKD clusters, but NOT for OpenShift clusters. + // Once enabled, this feature set cannot be changed back to Default, but can be changed to other feature sets and it allows upgrades. + OKD FeatureSet = "OKD" + + // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead + AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade, OKD} +) + +type FeatureGateSpec struct { + FeatureGateSelection `json:",inline"` +} + +// +union +type FeatureGateSelection struct { + // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. + // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + // +unionDiscriminator + // +optional + // +kubebuilder:validation:Enum=CustomNoUpgrade;DevPreviewNoUpgrade;TechPreviewNoUpgrade;OKD;"" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'OKD' ? self != '' : true",message="OKD cannot transition to Default" + FeatureSet FeatureSet `json:"featureSet,omitempty"` + + // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + // +optional + // +nullable + CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"` +} + +type CustomFeatureGates struct { + // enabled is a list of all feature gates that you want to force on + // +optional + Enabled []FeatureGateName `json:"enabled,omitempty"` + // disabled is a list of all feature gates that you want to force off + // +optional + Disabled []FeatureGateName `json:"disabled,omitempty"` +} + +// FeatureGateName is a string to enforce patterns on the name of a FeatureGate +// +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` +type FeatureGateName string + +type FeatureGateStatus struct { + // conditions represent the observations of the current state. + // Known .status.conditions.type are: "DeterminationDegraded" + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. + // Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate + // the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. + // The enabled/disabled values for a particular version may change during the life of the cluster as various + // .spec.featureSet values are selected. + // Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable + // lists is beyond the scope of this API and is the responsibility of individual operators. + // Only featureGates with .version in the ClusterVersion.status will be present in this list. + // +listType=map + // +listMapKey=version + // +optional + FeatureGates []FeatureGateDetails `json:"featureGates"` +} + +type FeatureGateDetails struct { + // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. + // +required + Version string `json:"version"` + // enabled is a list of all feature gates that are enabled in the cluster for the named version. + // +optional + Enabled []FeatureGateAttributes `json:"enabled"` + // disabled is a list of all feature gates that are disabled in the cluster for the named version. + // +optional + Disabled []FeatureGateAttributes `json:"disabled"` +} + +type FeatureGateAttributes struct { + // name is the name of the FeatureGate. + // +required + Name FeatureGateName `json:"name"` + + // possible (probable?) future additions include + // 1. support level (Stable, ServiceDeliveryOnly, TechPreview, DevPreview) + // 2. description +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type FeatureGateList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []FeatureGate `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image.go new file mode 100644 index 0000000000..82f46c8b6c --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image.go @@ -0,0 +1,191 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image governs policies related to imagestream imports and runtime configuration +// for external registries. It allows cluster admins to configure which registries +// OpenShift is allowed to import images from, extra CA trust bundles for external +// registries, and policies to block or allow registry hostnames. +// When exposing OpenShift's image registry to the public, this also lets cluster +// admins specify the external hostname. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=images,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Image struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ImageStatus `json:"status"` +} + +// ImportModeType describes how to import an image manifest. +// +enum +// +kubebuilder:validation:Enum:="";Legacy;PreserveOriginal +type ImportModeType string + +const ( + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" +) + +type ImageSpec struct { + // allowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + // +optional + // +listType=atomic + AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + // +listType=atomic + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted during imagestream import, pod image pull, build image pull, and + // imageregistry pullthrough. + // The namespace for this config map is openshift-config. + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + + // registrySources contains configuration that determines how the container runtime + // should treat individual registries when accessing images for builds+pods. (e.g. + // whether or not to allow insecure access). It does not contain configuration for the + // internal cluster registry. + // +optional + RegistrySources RegistrySources `json:"registrySources"` + + // imageStreamImportMode controls the import mode behaviour of imagestreams. + // It can be set to `Legacy` or `PreserveOriginal` or the empty string. If this value + // is specified, this setting is applied to all newly created imagestreams which do not have the + // value set. `Legacy` indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, + // the manifest list and all its sub-manifests will be imported. When empty, the behaviour will be + // decided based on the payload type advertised by the ClusterVersion status, i.e single arch payload + // implies the import mode is Legacy and multi payload implies PreserveOriginal. + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + ImageStreamImportMode ImportModeType `json:"imageStreamImportMode"` +} + +type ImageStatus struct { + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // This value is set by the image registry operator which controls the internal registry + // hostname. + // +optional + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + // +listType=atomic + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // imageStreamImportMode controls the import mode behaviour of imagestreams. It can be + // `Legacy` or `PreserveOriginal`. `Legacy` indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, + // the manifest list and all its sub-manifests will be imported. This value will be reconciled based + // on either the spec value or if no spec value is specified, the image registry operator would look + // at the ClusterVersion status to determine the payload type and set the import mode accordingly, + // i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal. + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + ImageStreamImportMode ImportModeType `json:"imageStreamImportMode,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Image `json:"items"` +} + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // domainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + // +optional + Insecure bool `json:"insecure,omitempty"` +} + +// RegistrySources holds cluster-wide information about how to handle the registries config. +// +// +kubebuilder:validation:XValidation:rule="has(self.blockedRegistries) ? !has(self.allowedRegistries) : true",message="Only one of blockedRegistries or allowedRegistries may be set" +type RegistrySources struct { + // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + // +optional + // +listType=atomic + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + // +listType=atomic + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + // +listType=atomic + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified + // domains in their pull specs. Registries will be searched in the order provided in the list. + // Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports. + // +optional + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Format=hostname + // +listType=set + ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go new file mode 100644 index 0000000000..0bd0d77705 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -0,0 +1,99 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/874 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagecontentpolicies,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type ImageContentPolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageContentPolicySpec `json:"spec"` +} + +// ImageContentPolicySpec is the specification of the ImageContentPolicy CRD. +type ImageContentPolicySpec struct { + // repositoryDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To pull image from mirrors by tags, should set the "allowMirrorByTags". + // + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // +optional + // +listType=map + // +listMapKey=source + RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicyList lists the items in the ImageContentPolicy CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ImageContentPolicy `json:"items"` +} + +// RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type RepositoryDigestMirrors struct { + // source is the repository that users refer to, e.g. in image pull specifications. + // +required + // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` + Source string `json:"source"` + // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. + // Pulling images by tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // +optional + AllowMirrorByTags bool `json:"allowMirrorByTags,omitempty"` + // mirrors is zero or more repositories that may also contain the same images. + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. No mirror will be configured. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. Other cluster configuration, + // including (but not limited to) other repositoryDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // +optional + // +listType=set + Mirrors []Mirror `json:"mirrors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` +type Mirror string diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go new file mode 100644 index 0000000000..df2258d12f --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -0,0 +1,141 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagedigestmirrorsets,scope=Cluster,shortName=idms +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type ImageDigestMirrorSet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageDigestMirrorSetSpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImageDigestMirrorSetStatus `json:"status,omitempty"` +} + +// ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD. +type ImageDigestMirrorSetSpec struct { + // imageDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in imageDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To use mirrors to pull images using tag specification, users should configure + // a list of mirrors using "ImageTagMirrorSet" CRD. + // + // If the image pull specification matches the repository of "source" in multiple imagedigestmirrorset objects, + // only the objects which define the most specific namespace match will be used. + // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as + // the "source", only the objects using quay.io/libpod/busybox are going to apply + // for pull specification quay.io/libpod/busybox. + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order. + // +optional + // +listType=atomic + ImageDigestMirrors []ImageDigestMirrors `json:"imageDigestMirrors"` +} + +type ImageDigestMirrorSetStatus struct{} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageDigestMirrorSetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ImageDigestMirrorSet `json:"items"` +} + +// +kubebuilder:validation:Pattern=`^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` +type ImageMirror string + +// MirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. +// +kubebuilder:validation:Enum=NeverContactSource;AllowContactingSource +type MirrorSourcePolicy string + +const ( + // NeverContactSource prevents image pull from the specified repository in the pull spec if the image pull from the mirror list fails. + NeverContactSource MirrorSourcePolicy = "NeverContactSource" + + // AllowContactingSource allows falling back to the specified repository in the pull spec if the image pull from the mirror list fails. + AllowContactingSource MirrorSourcePolicy = "AllowContactingSource" +) + +// ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type ImageDigestMirrors struct { + // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname + // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. + // "source" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // [*.]host + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +required + // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` + Source string `json:"source"` + // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. + // Images can be pulled from these mirrors only if they are referenced by their digests. + // The mirrored location is obtained by replacing the part of the input reference that + // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, + // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo + // repository to be used. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. + // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be + // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy" + // Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // "mirrors" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +optional + // +listType=set + Mirrors []ImageMirror `json:"mirrors,omitempty"` + // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. + // If unset, the image will continue to be pulled from the the repository in the pull spec. + // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + // +optional + MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_policy.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_policy.go new file mode 100644 index 0000000000..3cc46141c9 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_policy.go @@ -0,0 +1,322 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicy holds namespace-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec holds user settable values for configuration + // +required + Spec ImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImagePolicyStatus `json:"status"` +} + +// ImagePolicySpec is the specification of the ImagePolicy CRD. +type ImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy ImageSigstoreVerificationPolicy `json:"policy"` +} + +// +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" +// +kubebuilder:validation:XValidation:rule=`self.contains('*') ? self.matches('^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$') : true`,message="invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" +// +kubebuilder:validation:XValidation:rule=`!self.contains('*') ? self.matches('^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$') : true`,message="invalid repository namespace or image specification in the image scope" +// +kubebuilder:validation:MaxLength=512 +type ImageScope string + +// ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list. +type ImageSigstoreVerificationPolicy struct { + // rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. + // This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. + // +required + RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` + // signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. + // The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + // +optional + SignedIdentity *PolicyIdentity `json:"signedIdentity,omitempty"` +} + +// PolicyRootOfTrust defines the root of trust based on the selected policyType. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SigstoreImageVerificationPKI,rule="has(self.policyType) && self.policyType == 'PKI' ? has(self.pki) : !has(self.pki)",message="pki is required when policyType is PKI, and forbidden otherwise" +type PolicyRootOfTrust struct { + // policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. + // Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". + // When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + // When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. + // When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). + // +unionDiscriminator + // +required + PolicyType PolicyType `json:"policyType"` + // publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. + // publicKey is required when policyType is PublicKey, and forbidden otherwise. + // +optional + PublicKey *ImagePolicyPublicKeyRootOfTrust `json:"publicKey,omitempty"` + // fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. + // fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise + // For more information about Fulcio and Rekor, please refer to the document at: + // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + // +optional + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrust `json:"fulcioCAWithRekor,omitempty"` + // pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. + // pki is required when policyType is PKI, and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI + PKI *ImagePolicyPKIRootOfTrust `json:"pki,omitempty"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor +// +openshift:validation:FeatureGateAwareEnum:featureGate=SigstoreImageVerificationPKI,enum=PublicKey;FulcioCAWithRekor;PKI +type PolicyType string + +const ( + PublicKeyRootOfTrust PolicyType = "PublicKey" + FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor" + PKIRootOfTrust PolicyType = "PKI" +) + +// ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key. +type ImagePolicyPublicKeyRootOfTrust struct { + // keyData is a required field contains inline base64-encoded data for the PEM format public key. + // keyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=68 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the keyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the keyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + KeyData []byte `json:"keyData"` + // rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +optional + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key. +type ImagePolicyFulcioCAWithRekorRootOfTrust struct { + // fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. + // fulcioCAData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the fulcioCAData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the fulcioCAData must end with base64 encoding of '-----END CERTIFICATE-----'." + FulcioCAData []byte `json:"fulcioCAData"` + // rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData"` + // fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration. + // +required + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject"` +} + +// PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. +type PolicyFulcioSubject struct { + // oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. + // It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. + // When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + // Example: "https://expected.OIDC.issuer/" + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" + OIDCIssuer string `json:"oidcIssuer"` + // signedEmail is a required field holds the email address that the Fulcio certificate is issued for. + // The signedEmail must be a valid email address and at most 320 characters in length. + // Example: "expected-signing-user@example.com" + // +required + // +kubebuilder:validation:MaxLength=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + SignedEmail string `json:"signedEmail"` +} + +// ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type ImagePolicyPKIRootOfTrust struct { + // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caRootsData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caRootsData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caRootsData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + CertificateAuthorityRootsData []byte `json:"caRootsData"` + // caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. + // caIntermediatesData requires caRootsData to be set. + // +optional + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caIntermediatesData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caIntermediatesData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caIntermediatesData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued. + // +required + PKICertificateSubject PKICertificateSubject `json:"pkiCertificateSubject"` +} + +// PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued. +// +kubebuilder:validation:XValidation:rule="has(self.email) || has(self.hostname)", message="at least one of email or hostname must be set in pkiCertificateSubject" +// +openshift:enable:FeatureGate=SigstoreImageVerificationPKI +type PKICertificateSubject struct { + // email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. + // The email must be a valid email address and at most 320 characters in length. + // +optional + // +kubebuilder:validation:MaxLength:=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + Email string `json:"email,omitempty"` + // hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. + // The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. + // It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk. + // +optional + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="self.startsWith('*.') ? !format.dns1123Subdomain().validate(self.replace('*.', '', 1)).hasValue() : !format.dns1123Subdomain().validate(self).hasValue()",message="hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.'. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk." + Hostname string `json:"hostname,omitempty"` +} + +// PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact". +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" +// +union +type PolicyIdentity struct { + // matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. + // Allowed values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + // When set to "MatchRepoDigestOrExact", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + // When set to "MatchRepository", the identity in the signature must be in the same repository as the image identity. + // When set to "ExactRepository", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by "repository". + // When set to "RemapIdentity", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + // +unionDiscriminator + // +required + MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` + // exactRepository specifies the repository that must be exactly matched by the identity in the signature. + // exactRepository is required if matchPolicy is set to "ExactRepository". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity. + // +optional + PolicyMatchExactRepository *PolicyMatchExactRepository `json:"exactRepository,omitempty"` + // remapIdentity specifies the prefix remapping rule for verifying image identity. + // remapIdentity is required if matchPolicy is set to "RemapIdentity". It is used to verify that the signature claims a different registry/repository prefix than the original image. + // +optional + PolicyMatchRemapIdentity *PolicyMatchRemapIdentity `json:"remapIdentity,omitempty"` +} + +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:XValidation:rule=`self.matches('.*:([\\w][\\w.-]{0,127})$')? self.matches('^(localhost:[0-9]+)$'): true`,message="invalid repository or prefix in the signedIdentity, should not include the tag or digest" +// +kubebuilder:validation:XValidation:rule=`self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')`,message="invalid repository or prefix in the signedIdentity. The repository or prefix must starts with 'localhost' or a valid '.' separated domain. If contains registry paths, the path component names must start with at least one letter or number, with following parts able to be separated by one period, one or two underscore and multiple dashes." +type IdentityRepositoryPrefix string + +type PolicyMatchExactRepository struct { + // repository is the reference of the image identity to be matched. + // repository is required if matchPolicy is set to "ExactRepository". + // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + // +required + Repository IdentityRepositoryPrefix `json:"repository"` +} + +type PolicyMatchRemapIdentity struct { + // prefix is required if matchPolicy is set to "RemapIdentity". + // prefix is the prefix of the image identity to be matched. + // If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + // This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + Prefix IdentityRepositoryPrefix `json:"prefix"` + // signedPrefix is required if matchPolicy is set to "RemapIdentity". + // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` +} + +// IdentityMatchPolicy defines the type of matching for "matchPolicy". +// +kubebuilder:validation:Enum=MatchRepoDigestOrExact;MatchRepository;ExactRepository;RemapIdentity +type IdentityMatchPolicy string + +const ( + IdentityMatchPolicyMatchRepoDigestOrExact IdentityMatchPolicy = "MatchRepoDigestOrExact" + IdentityMatchPolicyMatchRepository IdentityMatchPolicy = "MatchRepository" + IdentityMatchPolicyExactRepository IdentityMatchPolicy = "ExactRepository" + IdentityMatchPolicyRemapIdentity IdentityMatchPolicy = "RemapIdentity" +) + +// +k8s:deepcopy-gen=true +type ImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicyList is a list of ImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ImagePolicies + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ImagePolicy `json:"items"` +} + +const ( + // ImagePolicyPending indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + ImagePolicyPending = "Pending" + // ImagePolicyApplied indicates that the policy has been applied + ImagePolicyApplied = "Applied" +) diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go new file mode 100644 index 0000000000..b7e1a6a873 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -0,0 +1,128 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagetagmirrorsets,scope=Cluster,shortName=itms +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type ImageTagMirrorSet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageTagMirrorSetSpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImageTagMirrorSetStatus `json:"status,omitempty"` +} + +// ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD. +type ImageTagMirrorSetSpec struct { + // imageTagMirrors allows images referenced by image tags in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in imageTagMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To use mirrors to pull images using digest specification only, users should configure + // a list of mirrors using "ImageDigestMirrorSet" CRD. + // + // If the image pull specification matches the repository of "source" in multiple imagetagmirrorset objects, + // only the objects which define the most specific namespace match will be used. + // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as + // the "source", only the objects using quay.io/libpod/busybox are going to apply + // for pull specification quay.io/libpod/busybox. + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order. + // +optional + // +listType=atomic + ImageTagMirrors []ImageTagMirrors `json:"imageTagMirrors"` +} + +type ImageTagMirrorSetStatus struct{} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTagMirrorSetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ImageTagMirrorSet `json:"items"` +} + +// ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type ImageTagMirrors struct { + // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname + // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. + // "source" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // [*.]host + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +required + // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` + Source string `json:"source"` + // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. + // Images can be pulled from these mirrors only if they are referenced by their tags. + // The mirrored location is obtained by replacing the part of the input reference that + // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, + // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo + // repository to be used. + // Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. + // Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. + // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be + // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". + // Other cluster configuration, including (but not limited to) other imageTagMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // "mirrors" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +optional + // +listType=set + Mirrors []ImageMirror `json:"mirrors,omitempty"` + // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. + // If unset, the image will continue to be pulled from the repository in the pull spec. + // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + // +optional + MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_infrastructure.go new file mode 100644 index 0000000000..369ba1e7a0 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -0,0 +1,2251 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status + +// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=infrastructures,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Infrastructure struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec InfrastructureSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InfrastructureStatus `json:"status"` +} + +// InfrastructureSpec contains settings that apply to the cluster infrastructure. +type InfrastructureSpec struct { + // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. + // This configuration file is used to configure the Kubernetes cloud provider integration + // when using the built-in cloud provider integration or the external cloud controller manager. + // The namespace for this config map is openshift-config. + // + // cloudConfig should only be consumed by the kube_cloud_config controller. + // The controller is responsible for using the user configuration in the spec + // for various platforms and combining that with the user provided ConfigMap in this field + // to create a stitched kube cloud config. + // The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace + // with the kube cloud config is stored in `cloud.conf` key. + // All the clients are expected to use the generated ConfigMap only. + // + // +optional + CloudConfig ConfigMapFileReference `json:"cloudConfig"` + + // platformSpec holds desired information specific to the underlying + // infrastructure provider. + PlatformSpec PlatformSpec `json:"platformSpec,omitempty"` +} + +// InfrastructureStatus describes the infrastructure the cluster is leveraging. +type InfrastructureStatus struct { + // infrastructureName uniquely identifies a cluster with a human friendly name. + // Once set it should not be changed. Must be of max length 27 and must have only + // alphanumeric or hyphen characters. + // +optional + InfrastructureName string `json:"infrastructureName"` + + // platform is the underlying infrastructure provider for the cluster. + // + // Deprecated: Use platformStatus.type instead. + // +optional + Platform PlatformType `json:"platform,omitempty"` + + // platformStatus holds status information specific to the underlying + // infrastructure provider. + // +optional + PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` + + // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering + // etcd servers and clients. + // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + // +optional + EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` + + // apiServerURL is a valid URI with scheme 'https', address and + // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console + // to tell users where to find the Kubernetes API. + // +optional + APIServerURL string `json:"apiServerURL"` + + // apiServerInternalURL is a valid URI with scheme 'https', + // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components + // like kubelets, to contact the Kubernetes API server using the + // infrastructure provider rather than Kubernetes networking. + // +optional + APIServerInternalURL string `json:"apiServerInternalURI"` + + // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // The 'External' mode indicates that the control plane is hosted externally to the cluster and that + // its components are not visible within the cluster. + // +kubebuilder:default=HighlyAvailable + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=DualReplica,enum=HighlyAvailable;SingleReplica;DualReplica;External + // +openshift:validation:FeatureGateAwareEnum:requiredFeatureGate=HighlyAvailableArbiter;DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External + // +optional + ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` + + // infrastructureTopology expresses the expectations for infrastructure services that do not run on control + // plane nodes, usually indicated by a node selector for a `role` value + // other than `master`. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // NOTE: External topology mode is not applicable for this field. + // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica + // +optional + InfrastructureTopology TopologyMode `json:"infrastructureTopology,omitempty"` + + // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. + // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. + // Valid values are "None" and "AllNodes". When omitted, the default value is "None". + // The default value of "None" indicates that no nodes will be setup with CPU partitioning. + // The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, + // and can then be further configured via the PerformanceProfile API. + // +kubebuilder:default=None + // +default="None" + // +kubebuilder:validation:Enum=None;AllNodes + // +optional + CPUPartitioning CPUPartitioningMode `json:"cpuPartitioning,omitempty"` +} + +// TopologyMode defines the topology mode of the control/infra nodes. +// NOTE: Enum validation is specified in each field that uses this type, +// given that External value is not applicable to the InfrastructureTopology +// field. +type TopologyMode string + +const ( + // "HighlyAvailable" is for operators to configure high-availability as much as possible. + HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + + // "HighlyAvailableArbiter" is for operators to configure for an arbiter HA deployment. + HighlyAvailableArbiterMode TopologyMode = "HighlyAvailableArbiter" + + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. + SingleReplicaTopologyMode TopologyMode = "SingleReplica" + + // "DualReplica" is for operators to configure for two node topology. + DualReplicaTopologyMode TopologyMode = "DualReplica" + + // "External" indicates that the component is running externally to the cluster. When specified + // as the control plane topology, operators should avoid scheduling workloads to masters or assume + // that any of the control plane components such as kubernetes API server or etcd are visible within + // the cluster. + ExternalTopologyMode TopologyMode = "External" +) + +// CPUPartitioningMode defines the mode for CPU partitioning +type CPUPartitioningMode string + +const ( + // CPUPartitioningNone means that no CPU Partitioning is on in this cluster infrastructure + CPUPartitioningNone CPUPartitioningMode = "None" + + // CPUPartitioningAllNodes means that all nodes are configured with CPU Partitioning in this cluster + CPUPartitioningAllNodes CPUPartitioningMode = "AllNodes" +) + +// PlatformLoadBalancerType defines the type of load balancer used by the cluster. +type PlatformLoadBalancerType string + +const ( + // LoadBalancerTypeUserManaged is a load balancer with control-plane VIPs managed outside of the cluster by the customer. + LoadBalancerTypeUserManaged PlatformLoadBalancerType = "UserManaged" + + // LoadBalancerTypeOpenShiftManagedDefault is the default load balancer with control-plane VIPs managed by the OpenShift cluster. + LoadBalancerTypeOpenShiftManagedDefault PlatformLoadBalancerType = "OpenShiftManagedDefault" +) + +// DNSRecordsType defines whether api, api-int, and ingress records are provided by +// the internal DNS infrastructure or must be configured external to the cluster. +// +kubebuilder:validation:Enum=Internal;External +// +enum +type DNSRecordsType string + +const ( + DNSRecordsTypeExternal DNSRecordsType = "External" + DNSRecordsTypeInternal DNSRecordsType = "Internal" +) + +// PlatformType is a specific supported infrastructure provider. +// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix;External +type PlatformType string + +const ( + // AWSPlatformType represents Amazon Web Services infrastructure. + AWSPlatformType PlatformType = "AWS" + + // AzurePlatformType represents Microsoft Azure infrastructure. + AzurePlatformType PlatformType = "Azure" + + // BareMetalPlatformType represents managed bare metal infrastructure. + BareMetalPlatformType PlatformType = "BareMetal" + + // GCPPlatformType represents Google Cloud Platform infrastructure. + GCPPlatformType PlatformType = "GCP" + + // LibvirtPlatformType represents libvirt infrastructure. + LibvirtPlatformType PlatformType = "Libvirt" + + // OpenStackPlatformType represents OpenStack infrastructure. + OpenStackPlatformType PlatformType = "OpenStack" + + // NonePlatformType means there is no infrastructure provider. + NonePlatformType PlatformType = "None" + + // VSpherePlatformType represents VMWare vSphere infrastructure. + VSpherePlatformType PlatformType = "VSphere" + + // OvirtPlatformType represents oVirt/RHV infrastructure. + OvirtPlatformType PlatformType = "oVirt" + + // IBMCloudPlatformType represents IBM Cloud infrastructure. + IBMCloudPlatformType PlatformType = "IBMCloud" + + // KubevirtPlatformType represents KubeVirt/Openshift Virtualization infrastructure. + KubevirtPlatformType PlatformType = "KubeVirt" + + // EquinixMetalPlatformType represents Equinix Metal infrastructure. + EquinixMetalPlatformType PlatformType = "EquinixMetal" + + // PowerVSPlatformType represents IBM Power Systems Virtual Servers infrastructure. + PowerVSPlatformType PlatformType = "PowerVS" + + // AlibabaCloudPlatformType represents Alibaba Cloud infrastructure. + AlibabaCloudPlatformType PlatformType = "AlibabaCloud" + + // NutanixPlatformType represents Nutanix infrastructure. + NutanixPlatformType PlatformType = "Nutanix" + + // ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. + ExternalPlatformType PlatformType = "External" +) + +// IBMCloudProviderType is a specific supported IBM Cloud provider cluster type +type IBMCloudProviderType string + +const ( + // Classic means that the IBM Cloud cluster is using classic infrastructure + IBMCloudProviderTypeClassic IBMCloudProviderType = "Classic" + + // VPC means that the IBM Cloud cluster is using VPC infrastructure + IBMCloudProviderTypeVPC IBMCloudProviderType = "VPC" + + // IBMCloudProviderTypeUPI means that the IBM Cloud cluster is using user provided infrastructure. + // This is utilized in IBM Cloud Satellite environments. + IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI" +) + +// DNSType indicates whether the cluster DNS is hosted by the cluster or Core DNS . +type DNSType string + +const ( + // ClusterHosted indicates that a DNS solution other than the default provided by the + // cloud platform is in use. In this mode, the cluster hosts a DNS solution during installation and the + // user is expected to provide their own DNS solution post-install. + // When the DNS solution is `ClusterHosted`, the cluster will continue to use the + // default Load Balancers provided by the cloud platform. + ClusterHostedDNSType DNSType = "ClusterHosted" + + // PlatformDefault indicates that the cluster is using the default DNS solution for the + // cloud platform. OpenShift is responsible for all the LB and DNS configuration needed for the + // cluster to be functional with no intervention from the user. To accomplish this, OpenShift + // configures the default LB and DNS solutions provided by the underlying cloud. + PlatformDefaultDNSType DNSType = "PlatformDefault" +) + +// ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. +type ExternalPlatformSpec struct { + // platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + // This field is solely for informational and reporting purposes and is not expected to be used for decision-making. + // +kubebuilder:default:="Unknown" + // +default="Unknown" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'Unknown' || self == oldSelf",message="platform name cannot be changed once set" + // +optional + PlatformName string `json:"platformName,omitempty"` +} + +// PlatformSpec holds the desired state specific to the underlying infrastructure provider +// of the current cluster. Since these are used at spec-level for the underlying cluster, it +// is supposed that only one of the spec structs is set. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.vsphere) && has(self.vsphere) ? size(self.vsphere.vcenters) < 2 : true",message="vcenters can have at most 1 item when configured post-install" +type PlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + // "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + // components may not support all platforms, and must handle unrecognized + // platforms as None if they do not support that platform. + // + // +unionDiscriminator + Type PlatformType `json:"type"` + + // aws contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformSpec `json:"aws,omitempty"` + + // azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformSpec `json:"azure,omitempty"` + + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformSpec `json:"gcp,omitempty"` + + // baremetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` + + // openstack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` + + // ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` + + // vsphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` + + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` + + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + + // powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` + + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` + + // nutanix contains settings specific to the Nutanix infrastructure provider. + // +optional + Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"` + + // ExternalPlatformType represents generic infrastructure provider. + // Platform-specific components should be supplemented separately. + // +optional + External *ExternalPlatformSpec `json:"external,omitempty"` +} + +// CloudControllerManagerState defines whether Cloud Controller Manager presence is expected or not +type CloudControllerManagerState string + +const ( + // Cloud Controller Manager is enabled and expected to be installed. + // This value indicates that new nodes should be tainted as uninitialized when created, + // preventing them from running workloads until they are initialized by the cloud controller manager. + CloudControllerManagerExternal CloudControllerManagerState = "External" + + // Cloud Controller Manager is disabled and not expected to be installed. + // This value indicates that new nodes should not be tainted + // and no extra node initialization is expected from the cloud controller manager. + CloudControllerManagerNone CloudControllerManagerState = "None" +) + +// CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings +// +kubebuilder:validation:XValidation:rule="(has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != \"External\")",message="state may not be added or removed once set" +type CloudControllerManagerStatus struct { + // state determines whether or not an external Cloud Controller Manager is expected to + // be installed within the cluster. + // https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + // + // Valid values are "External", "None" and omitted. + // When set to "External", new nodes will be tainted as uninitialized when created, + // preventing them from running workloads until they are initialized by the cloud controller manager. + // When omitted or set to "None", new nodes will be not tainted + // and no extra initialization from the cloud controller manager is expected. + // +kubebuilder:validation:Enum="";External;None + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="state is immutable once set" + // +optional + State CloudControllerManagerState `json:"state"` +} + +// ExternalPlatformStatus holds the current status of the generic External infrastructure provider. +// +kubebuilder:validation:XValidation:rule="has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager)",message="cloudControllerManager may not be added or removed once set" +type ExternalPlatformStatus struct { + // cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). + // When omitted, new nodes will be not tainted + // and no extra initialization from the cloud controller manager is expected. + // +optional + CloudControllerManager CloudControllerManagerStatus `json:"cloudControllerManager"` +} + +// PlatformStatus holds the current status specific to the underlying infrastructure provider +// of the current cluster. Since these are used at status-level for the underlying cluster, it +// is supposed that only one of the status structs is set. +type PlatformStatus struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". + // Individual components may not support all platforms, and must handle + // unrecognized platforms as None if they do not support that platform. + // + // This value will be synced with to the `status.platform` and `status.platformStatus.type`. + // Currently this value cannot be changed once set. + Type PlatformType `json:"type"` + + // aws contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` + + // azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformStatus `json:"azure,omitempty"` + + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformStatus `json:"gcp,omitempty"` + + // baremetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` + + // openstack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` + + // ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` + + // vsphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` + + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` + + // kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` + + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` + + // powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` + + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` + + // nutanix contains settings specific to the Nutanix infrastructure provider. + // +optional + Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"` + + // external contains settings specific to the generic External infrastructure provider. + // +optional + External *ExternalPlatformStatus `json:"external,omitempty"` +} + +// AWSServiceEndpoint store the configuration of a custom url to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // name is the name of the AWS service. + // The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// IPFamilyType represents the IP protocol family that cloud platform resources should use. +// +kubebuilder:validation:Enum=IPv4;DualStackIPv6Primary;DualStackIPv4Primary +type IPFamilyType string + +const ( + // IPv4 indicates that cloud platform resources should use IPv4 addressing only. + IPv4 IPFamilyType = "IPv4" + + // DualStackIPv6Primary indicates that cloud platform resources should use dual-stack networking with IPv6 as primary. + DualStackIPv6Primary IPFamilyType = "DualStackIPv6Primary" + + // DualStackIPv4Primary indicates that cloud platform resources should use dual-stack networking with IPv4 as primary. + DualStackIPv4Primary IPFamilyType = "DualStackIPv4Primary" +) + +// AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AWSPlatformSpec struct { + // serviceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +listType=atomic + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. +type AWSPlatformStatus struct { + // region holds the default AWS region for new AWS resources created by the cluster. + Region string `json:"region"` + + // serviceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +listType=atomic + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // resourceTags is a list of additional tags to apply to AWS resources created for the cluster. + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. + // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags + // available for the user. + // +kubebuilder:validation:MaxItems=25 + // +listType=atomic + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AWSClusterHostedDNSInstall + // +optional + // +nullable + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for AWS + // network resources. This controls whether AWS resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AWSDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. + // Key should consist of between 1 and 128 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag key. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" + // +required + Key string `json:"key"` + // value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. + // Value should consist of between 1 and 256 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. + // Some AWS service do not support empty values. Since tags are added to resources in many services, the + // length of the tag value must meet the requirements of all services. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag value. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" + // +required + Value string `json:"value"` +} + +// AzurePlatformSpec holds the desired state of the Azure infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AzurePlatformSpec struct{} + +// AzurePlatformStatus holds the current status of the Azure infrastructure provider. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +type AzurePlatformStatus struct { + // resourceGroupName is the Resource Group for new Azure resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName"` + + // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. + // If empty, the value is same as ResourceGroupName. + // +optional + NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` + + // cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK + // with the appropriate Azure API endpoints. + // If empty, the value is equal to `AzurePublicCloud`. + // +optional + CloudName AzureCloudEnvironment `json:"cloudName,omitempty"` + + // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + // +optional + ARMEndpoint string `json:"armEndpoint,omitempty"` + + // resourceTags is a list of additional tags to apply to Azure resources created for the cluster. + // See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. + // Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags + // may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" + // +listType=atomic + // +optional + ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AzureClusterHostedDNSInstall + // +optional + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for Azure + // network resources. This controls whether Azure resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AzureDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` +} + +// AzureResourceTag is a tag to apply to Azure resources created for the cluster. +type AzureResourceTag struct { + // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key + // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric + // characters and the following special characters `_ . -`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$` + Key string `json:"key"` + // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value + // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$` + Value string `json:"value"` +} + +// AzureCloudEnvironment is the name of the Azure cloud environment +// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud +type AzureCloudEnvironment string + +const ( + // AzurePublicCloud is the general-purpose, public Azure cloud environment. + AzurePublicCloud AzureCloudEnvironment = "AzurePublicCloud" + + // AzureUSGovernmentCloud is the Azure cloud environment for the US government. + AzureUSGovernmentCloud AzureCloudEnvironment = "AzureUSGovernmentCloud" + + // AzureChinaCloud is the Azure cloud environment used in China. + AzureChinaCloud AzureCloudEnvironment = "AzureChinaCloud" + + // AzureGermanCloud is the Azure cloud environment used in Germany. + AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud" + + // AzureStackCloud is the Azure cloud environment used at the edge and on premises. + AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" +) + +// Start: TOMBSTONE + +// GCPServiceEndpointName is the name of the GCP Service Endpoint. +// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;IAMCredentials;OAuth;ServiceUsage;Storage;STS +//type GCPServiceEndpointName string + +// GCPServiceEndpoint store the configuration of a custom url to +// override existing defaults of GCP Services. +// type GCPServiceEndpoint struct { +// name is the name of the GCP service whose endpoint is being overridden. +// This must be provided and cannot be empty. +// +// Allowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, +// Storage, and TagManager. +// +// As an example, when setting the name to Compute all requests made by the caller to the GCP Compute +// Service will be directed to the endpoint specified in the url field. +// +// +required +// Name GCPServiceEndpointName `json:"name"` + +// url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified +// in the name field. +// url is required, must use the scheme https, must not be more than 253 characters in length, +// and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL) +// +// An example of a valid endpoint that overrides the Compute Service: "https://compute-myendpoint1.p.googleapis.com" +// +// +required +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" +// +kubebuilder:validation:XValidation:rule="isURL(self) ? (url(self).getScheme() == \"https\") : true",message="scheme must be https" +// +kubebuilder:validation:XValidation:rule="url(self).getEscapedPath() == \"\" || url(self).getEscapedPath() == \"/\"",message="url must consist only of a scheme and domain. The url path must be empty." +// URL string `json:"url"` +//} + +// End: TOMBSTONE + +// GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. +// This only includes fields that can be modified in the cluster. +type GCPPlatformSpec struct{} + +// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +type GCPPlatformStatus struct { + // resourceGroupName is the Project ID for new GCP resources created for the cluster. + ProjectID string `json:"projectID"` + + // region holds the region for new GCP resources created for the cluster. + Region string `json:"region"` + + // resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. + // See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. + // GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, + // allowing 32 labels for user configuration. + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceLabels are immutable and may only be configured during installation" + // +listType=map + // +listMapKey=key + // +optional + ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` + + // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. + // See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on + // tagging GCP resources. GCP supports a maximum of 50 tags per resource. + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" + // +listType=map + // +listMapKey=key + // +optional + ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` + + // This field was introduced and removed under tech preview. + // To avoid conflicts with serialisation, this field name may never be used again. + // Tombstone the field as a reminder. + // ClusterHostedDNS ClusterHostedDNS `json:"clusterHostedDNS,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=GCPClusterHostedDNSInstall + // +optional + // +nullable + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // This field was introduced and removed under tech preview. + // serviceEndpoints specifies endpoints that override the default endpoints + // used when creating clients to interact with GCP services. + // When not specified, the default endpoint for the GCP region will be used. + // Only 1 endpoint override is permitted for each GCP service. + // The maximum number of endpoint overrides allowed is 11. + // To avoid conflicts with serialisation, this field name may never be used again. + // Tombstone the field as a reminder. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=11 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" + // +optional + // +openshift:enable:FeatureGate=GCPCustomAPIEndpointsInstall + // ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// GCPResourceLabel is a label to apply to GCP resources created for the cluster. +type GCPResourceLabel struct { + // key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. + // Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, + // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` + // and `openshift-io`. + // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`" + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` + Key string `json:"key"` + + // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. + // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` + Value string `json:"value"` +} + +// GCPResourceTag is a tag to apply to GCP resources created for the cluster. +type GCPResourceTag struct { + // parentID is the ID of the hierarchical resource where the tags are defined, + // e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: + // https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, + // https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. + // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. + // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, + // and hyphens, and must start with a letter, and cannot end with a hyphen. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` + ParentID string `json:"parentID"` + + // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. + // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase + // alphanumeric characters, and the following special characters `._-`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` + Key string `json:"key"` + + // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. + // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase + // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` + Value string `json:"value"` +} + +// CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS +// solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's +// Load Balancer configuration needs to be provided so that the DNS solution hosted +// within the cluster can be configured with those values. +// +kubebuilder:validation:XValidation:rule="has(self.dnsType) && self.dnsType != 'ClusterHosted' ? !has(self.clusterHosted) : true",message="clusterHosted is permitted only when dnsType is ClusterHosted" +// +union +type CloudLoadBalancerConfig struct { + // dnsType indicates the type of DNS solution in use within the cluster. Its default value of + // `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + // It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + // the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + // The cluster's use of the cloud's Load Balancers is unaffected by this setting. + // The value is immutable after it has been set at install time. + // Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + // Enabling this functionality allows the user to start their own DNS solution outside the cluster after + // installation is complete. The customer would be responsible for configuring this custom DNS solution, + // and it can be run in addition to the in-cluster DNS solution. + // +default="PlatformDefault" + // +kubebuilder:default:="PlatformDefault" + // +kubebuilder:validation:Enum="ClusterHosted";"PlatformDefault" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="dnsType is immutable" + // +optional + // +unionDiscriminator + DNSType DNSType `json:"dnsType,omitempty"` + + // clusterHosted holds the IP addresses of API, API-Int and Ingress Load + // Balancers on Cloud Platforms. The DNS solution hosted within the cluster + // use these IP addresses to provide resolution for API, API-Int and Ingress + // services. + // +optional + // +unionMember,optional + ClusterHosted *CloudLoadBalancerIPs `json:"clusterHosted,omitempty"` +} + +// CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API, +// API-Int and Ingress Load balancers. They will be populated as soon as the +// respective Load Balancers have been configured. These values are utilized +// to configure the DNS solution hosted within the cluster. +type CloudLoadBalancerIPs struct { + // apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + // Entries in the apiIntLoadBalancerIPs must be unique. + // A maximum of 16 IP addresses are permitted. + // +kubebuilder:validation:Format=ip + // +listType=set + // +kubebuilder:validation:MaxItems=16 + // +optional + APIIntLoadBalancerIPs []IP `json:"apiIntLoadBalancerIPs,omitempty"` + + // apiLoadBalancerIPs holds Load Balancer IPs for the API service. + // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + // Could be empty for private clusters. + // Entries in the apiLoadBalancerIPs must be unique. + // A maximum of 16 IP addresses are permitted. + // +kubebuilder:validation:Format=ip + // +listType=set + // +kubebuilder:validation:MaxItems=16 + // +optional + APILoadBalancerIPs []IP `json:"apiLoadBalancerIPs,omitempty"` + + // ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + // Entries in the ingressLoadBalancerIPs must be unique. + // A maximum of 16 IP addresses are permitted. + // +kubebuilder:validation:Format=ip + // +listType=set + // +kubebuilder:validation:MaxItems=16 + // +optional + IngressLoadBalancerIPs []IP `json:"ingressLoadBalancerIPs,omitempty"` +} + +// BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform. +// +union +type BareMetalPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on BareMetal platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. +// This only includes fields that can be modified in the cluster. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" +type BareMetalPlatformSpec struct { + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.apiServerInternalIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + APIServerInternalIPs []IP `json:"apiServerInternalIPs"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.ingressIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + IngressIPs []IP `json:"ingressIPs"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster + // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, + // for example "10.0.0.0/8" or "fd00::/8". + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. +// For more information about the network architecture used with the BareMetal platform type, see: +// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" +type BareMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + IngressIPs []string `json:"ingressIPs"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // BareMetal deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +optional + LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform. +// +union +type OpenStackPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on OpenStack platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. +// This only includes fields that can be modified in the cluster. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" +type OpenStackPlatformSpec struct { + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.apiServerInternalIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + APIServerInternalIPs []IP `json:"apiServerInternalIPs"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.ingressIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + IngressIPs []IP `json:"ingressIPs"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster + // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, + // for example "10.0.0.0/8" or "fd00::/8". + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" +type OpenStackPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // cloudName is the name of the desired OpenStack cloud in the + // client configuration file (`clouds.yaml`). + CloudName string `json:"cloudName,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + IngressIPs []string `json:"ingressIPs"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // OpenStack deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +optional + LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform. +// +union +type OvirtPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on Ovirt platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OvirtPlatformSpec struct{} + +// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" +type OvirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + IngressIPs []string `json:"ingressIPs"` + + // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +optional + LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` +} + +// VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform. +// +union +type VSpherePlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on VSphere platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// The VSphereFailureDomainZoneType is a string representation of a failure domain +// zone type. There are two supportable types HostGroup and ComputeCluster +// +enum +type VSphereFailureDomainZoneType string + +// The VSphereFailureDomainRegionType is a string representation of a failure domain +// region type. There are two supportable types ComputeCluster and Datacenter +// +enum +type VSphereFailureDomainRegionType string + +const ( + // HostGroupFailureDomainZone is a failure domain zone for a vCenter vm-host group. + HostGroupFailureDomainZone VSphereFailureDomainZoneType = "HostGroup" + // ComputeClusterFailureDomainZone is a failure domain zone for a vCenter compute cluster. + ComputeClusterFailureDomainZone VSphereFailureDomainZoneType = "ComputeCluster" + // DatacenterFailureDomainRegion is a failure domain region for a vCenter datacenter. + DatacenterFailureDomainRegion VSphereFailureDomainRegionType = "Datacenter" + // ComputeClusterFailureDomainRegion is a failure domain region for a vCenter compute cluster. + ComputeClusterFailureDomainRegion VSphereFailureDomainRegionType = "ComputeCluster" +) + +// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'HostGroup' ? has(self.regionAffinity) && self.regionAffinity.type == 'ComputeCluster' : true",message="when zoneAffinity type is HostGroup, regionAffinity type must be ComputeCluster" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'ComputeCluster' ? has(self.regionAffinity) && self.regionAffinity.type == 'Datacenter' : true",message="when zoneAffinity type is ComputeCluster, regionAffinity type must be Datacenter" +type VSpherePlatformFailureDomainSpec struct { + // name defines the arbitrary but unique name + // of a failure domain. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Name string `json:"name"` + + // region defines the name of a region tag that will + // be attached to a vCenter datacenter. The tag + // category in vCenter must be named openshift-region. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + Region string `json:"region"` + + // zone defines the name of a zone tag that will + // be attached to a vCenter cluster. The tag + // category in vCenter must be named openshift-zone. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + Zone string `json:"zone"` + + // regionAffinity holds the type of region, Datacenter or ComputeCluster. + // When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + // When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + RegionAffinity *VSphereFailureDomainRegionAffinity `json:"regionAffinity,omitempty"` + + // zoneAffinity holds the type of the zone and the hostGroup which + // vmGroup and the hostGroup names in vCenter corresponds to + // a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + ZoneAffinity *VSphereFailureDomainZoneAffinity `json:"zoneAffinity,omitempty"` + + // server is the fully-qualified domain name or the IP address of the vCenter server. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // --- + // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname + Server string `json:"server"` + + // topology describes a given failure domain using vSphere constructs + // +required + Topology VSpherePlatformTopology `json:"topology"` +} + +// VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, +// computeCluster, networks, datastore and resourcePool - to provision virtual machines. +type VSpherePlatformTopology struct { + // datacenter is the name of vCenter datacenter in which virtual machines will be located. + // The maximum length of the datacenter name is 80 characters. + // +required + // +kubebuilder:validation:MaxLength=80 + Datacenter string `json:"datacenter"` + + // computeCluster the absolute path of the vCenter cluster + // in which virtual machine will be located. + // The absolute path is of the form //host/. + // The maximum length of the path is 2048 characters. + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/host/.*?` + ComputeCluster string `json:"computeCluster"` + + // networks is the list of port group network names within this failure domain. + // If feature gate VSphereMultiNetworks is enabled, up to 10 network adapters may be defined. + // 10 is the maximum number of virtual network devices which may be attached to a VM as defined by: + // https://configmax.esp.vmware.com/guest?vmwareproduct=vSphere&release=vSphere%208.0&categories=1-0 + // The available networks (port groups) can be listed using + // `govc ls 'network/*'` + // Networks should be in the form of an absolute path: + // //network/. + // +required + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiNetworks,maxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + Networks []string `json:"networks"` + + // datastore is the absolute path of the datastore in which the + // virtual machine is located. + // The absolute path is of the form //datastore/ + // The maximum length of the path is 2048 characters. + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?` + Datastore string `json:"datastore"` + + // resourcePool is the absolute path of the resource pool where virtual machines will be + // created. The absolute path is of the form //host//Resources/. + // The maximum length of the path is 2048 characters. + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/host/.*?/Resources.*` + // +optional + ResourcePool string `json:"resourcePool,omitempty"` + + // folder is the absolute path of the folder where + // virtual machines are located. The absolute path + // is of the form //vm/. + // The maximum length of the path is 2048 characters. + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Folder string `json:"folder,omitempty"` + + // template is the full inventory path of the virtual machine or template + // that will be cloned when creating new machines in this failure domain. + // The maximum length of the path is 2048 characters. + // + // When omitted, the template will be calculated by the control plane + // machineset operator based on the region and zone defined in + // VSpherePlatformFailureDomainSpec. + // For example, for zone=zonea, region=region1, and infrastructure name=test, + // the template path would be calculated as //vm/test-rhcos-region1-zonea. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Template string `json:"template,omitempty"` +} + +// VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) +// and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. +// This configuration within vCenter creates the required association between a failure domain, virtual machines +// and ESXi hosts to create a vm-host based zone. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'HostGroup' ? has(self.hostGroup) : !has(self.hostGroup)",message="hostGroup is required when type is HostGroup, and forbidden otherwise" +// +union +type VSphereFailureDomainZoneAffinity struct { + // type determines the vSphere object type for a zone within this failure domain. + // Available types are ComputeCluster and HostGroup. + // When set to ComputeCluster, this means the vCenter cluster defined is the zone. + // When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + // this means the zone is defined by the grouping of those fields. + // +kubebuilder:validation:Enum:=HostGroup;ComputeCluster + // +required + // +unionDiscriminator + Type VSphereFailureDomainZoneType `json:"type"` + + // hostGroup holds the vmGroup and the hostGroup names in vCenter + // corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +unionMember + // +optional + HostGroup *VSphereFailureDomainHostGroup `json:"hostGroup,omitempty"` +} + +// VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the +// VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster. +// +union +type VSphereFailureDomainRegionAffinity struct { + // type determines the vSphere object type for a region within this failure domain. + // Available types are Datacenter and ComputeCluster. + // When set to Datacenter, this means the vCenter Datacenter defined is the region. + // When set to ComputeCluster, this means the vCenter cluster defined is the region. + // +kubebuilder:validation:Enum:=ComputeCluster;Datacenter + // +required + // +unionDiscriminator + Type VSphereFailureDomainRegionType `json:"type"` +} + +// VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter +// corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also +// contains the vmHostRule which is an affinity vm-host rule in vCenter. +type VSphereFailureDomainHostGroup struct { + // vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + // vmGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMGroup string `json:"vmGroup"` + + // hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + // hostGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + HostGroup string `json:"hostGroup"` + + // vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + // vmHostRule is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMHostRule string `json:"vmHostRule"` +} + +// VSpherePlatformVCenterSpec stores the vCenter connection fields. +// This is used by the vSphere CCM. +type VSpherePlatformVCenterSpec struct { + + // server is the fully-qualified domain name or the IP address of the vCenter server. + // +required + // +kubebuilder:validation:MaxLength=255 + // --- + // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname + Server string `json:"server"` + + // port is the TCP port that will be used to communicate to + // the vCenter endpoint. + // When omitted, this means the user has no opinion and + // it is up to the platform to choose a sensible default, + // which is subject to change over time. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32767 + // +optional + Port int32 `json:"port,omitempty"` + + // The vCenter Datacenters in which the RHCOS + // vm guests are located. This field will + // be used by the Cloud Controller Manager. + // Each datacenter listed here should be used within + // a topology. + // +required + // +kubebuilder:validation:MinItems=1 + // +listType=set + Datacenters []string `json:"datacenters"` +} + +// VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for +// including and excluding IP ranges in the cloud provider. +// This would be used for example when multiple network adapters are attached to +// a guest to help determine which IP address the cloud config manager should use +// for the external and internal node networking. +type VSpherePlatformNodeNetworkingSpec struct { + // networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs + // that will be used in respective status.addresses fields. + // --- + // + Validation is applied via a patch, we validate the format as cidr + // +listType=set + // +optional + NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"` + + // network VirtualMachine's VM Network names that will be used to when searching + // for status.addresses fields. Note that if internal.networkSubnetCIDR and + // external.networkSubnetCIDR are not set, then the vNIC associated to this network must + // only have a single IP address assigned to it. + // The available networks (port groups) can be listed using + // `govc ls 'network/*'` + // +optional + Network string `json:"network,omitempty"` + + // excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting + // the IP address from the VirtualMachine's VM for use in the status.addresses fields. + // --- + // + Validation is applied via a patch, we validate the format as cidr + // +listType=atomic + // +optional + ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"` +} + +// VSpherePlatformNodeNetworking holds the external and internal node networking spec. +type VSpherePlatformNodeNetworking struct { + // external represents the network configuration of the node that is externally routable. + // +optional + External VSpherePlatformNodeNetworkingSpec `json:"external"` + // internal represents the network configuration of the node that is routable only within the cluster. + // +optional + Internal VSpherePlatformNodeNetworkingSpec `json:"internal"` +} + +// VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. +// In the future the cloud provider operator, storage operator and machine operator will +// use these fields for configuration. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.vcenters) && has(self.vcenters) ? size(self.vcenters) < 2 : true",message="vcenters can have at most 1 item when configured post-install" +type VSpherePlatformSpec struct { + // vcenters holds the connection details for services to communicate with vCenter. + // Currently, only a single vCenter is supported, but in tech preview 3 vCenters are supported. + // Once the cluster has been installed, you are unable to change the current number of defined + // vCenters except in the case where the cluster has been upgraded from a version of OpenShift + // where the vsphere platform spec was not present. You may make modifications to the existing + // vCenters that are defined in the vcenters list in order to match with any added or modified + // failure domains. + // --- + // + If VCenters is not defined use the existing cloud-config configmap defined + // + in openshift-config. + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=3 + // +kubebuilder:validation:XValidation:rule="size(self) != size(oldSelf) ? size(oldSelf) == 0 && size(self) < 2 : true",message="vcenters cannot be added or removed once set" + // +listType=atomic + // +optional + VCenters []VSpherePlatformVCenterSpec `json:"vcenters,omitempty"` + + // failureDomains contains the definition of region, zone and the vCenter topology. + // If this is omitted failure domains (regions and zones) will not be used. + // +listType=map + // +listMapKey=name + // +optional + FailureDomains []VSpherePlatformFailureDomainSpec `json:"failureDomains,omitempty"` + + // nodeNetworking contains the definition of internal and external network constraints for + // assigning the node's networking. + // If this field is omitted, networking defaults to the legacy + // address selection behavior which is to only support a single address and + // return the first one found. + // +optional + NodeNetworking VSpherePlatformNodeNetworking `json:"nodeNetworking,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.apiServerInternalIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + APIServerInternalIPs []IP `json:"apiServerInternalIPs"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.ingressIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + IngressIPs []IP `json:"ingressIPs"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster + // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, + // for example "10.0.0.0/8" or "fd00::/8". + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" +type VSpherePlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + IngressIPs []string `json:"ingressIPs"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // vSphere deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +optional + LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// IBMCloudServiceEndpoint stores the configuration of a custom url to +// override existing defaults of IBM Cloud Services. +type IBMCloudServiceEndpoint struct { + // name is the name of the IBM Cloud service. + // Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. + // For example, the IBM Cloud Private IAM service could be configured with the + // service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` + // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured + // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` + // + // +required + Name IBMCloudServiceName `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. The path must follow the pattern + // /v[0,9]+ or /api/v[0,9]+ + // + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:MaxLength=300 + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" + // +openshift:validation:FeatureGateAwareXValidation:featureGate=DyanmicServiceEndpointIBMCloud,rule="url(self).getScheme() == \"https\"",message="url must use https scheme" + // +openshift:validation:FeatureGateAwareXValidation:featureGate=DyanmicServiceEndpointIBMCloud,rule=`matches((url(self).getEscapedPath()), '^/(api/)?v[0-9]+/{0,1}$')`,message="url path must match /v[0,9]+ or /api/v[0,9]+" + URL string `json:"url"` +} + +// IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type IBMCloudPlatformSpec struct { + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of an IBM service. These endpoints are used by components + // within the cluster when trying to reach the IBM Cloud Services that have been + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus + // are updated to reflect the same custom endpoints. + // A maximum of 13 service endpoints overrides are supported. + // +kubebuilder:validation:MaxItems=13 + // +listType=map + // +listMapKey=name + // +optional + // +openshift:enable:FeatureGate=DyanmicServiceEndpointIBMCloud + ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. +type IBMCloudPlatformStatus struct { + // location is where the cluster has been deployed + Location string `json:"location,omitempty"` + + // resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName,omitempty"` + + // providerType indicates the type of cluster that was created + ProviderType IBMCloudProviderType `json:"providerType,omitempty"` + + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` + + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // for the cluster's base domain + DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` + + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of an IBM service. These endpoints are used by components + // within the cluster when trying to reach the IBM Cloud Services that have been + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus + // are updated to reflect the same custom endpoints. + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=DyanmicServiceEndpointIBMCloud,maxItems=13 + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type KubevirtPlatformSpec struct{} + +// KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider. +type KubevirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type EquinixMetalPlatformSpec struct{} + +// EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider. +type EquinixMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// PowervsServiceEndpoint stores the configuration of a custom url to +// override existing defaults of PowerVS Services. +type PowerVSServiceEndpoint struct { + // name is the name of the Power VS service. + // Few of the services are + // IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + // + // +required + // +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;Power;ResourceController;ResourceManager;VPC + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. +// This only includes fields that can be modified in the cluster. +type PowerVSPlatformSpec struct { + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceGroup) || has(self.resourceGroup)",message="cannot unset resourceGroup once set" +type PowerVSPlatformStatus struct { + // region holds the default Power VS region for new Power VS resources created by the cluster. + Region string `json:"region"` + + // zone holds the default zone for the new Power VS resources created by the cluster. + // Note: Currently only single-zone OCP clusters are supported + Zone string `json:"zone"` + + // resourceGroup is the resource group name for new IBMCloud resources created for a cluster. + // The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. + // More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. + // When omitted, the image registry operator won't be able to configure storage, + // which results in the image registry cluster operator not being in an available state. + // + // +kubebuilder:validation:Pattern=^[a-zA-Z0-9-_ ]+$ + // +kubebuilder:validation:MaxLength=40 + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="resourceGroup is immutable once set" + // +optional + ResourceGroup string `json:"resourceGroup"` + + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` + + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // for the cluster's base domain + DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` +} + +// AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AlibabaCloudPlatformSpec struct{} + +// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. +type AlibabaCloudPlatformStatus struct { + // region specifies the region for Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` + // +required + Region string `json:"region"` + // resourceGroupID is the ID of the resource group for the cluster. + // +kubebuilder:validation:Pattern=`^(rg-[0-9A-Za-z]+)?$` + // +optional + ResourceGroupID string `json:"resourceGroupID,omitempty"` + // resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:MaxItems=20 + // +listType=map + // +listMapKey=key + // +optional + ResourceTags []AlibabaCloudResourceTag `json:"resourceTags,omitempty"` +} + +// AlibabaCloudResourceTag is the set of tags to add to apply to resources. +type AlibabaCloudResourceTag struct { + // key is the key of the tag. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Key string `json:"key"` + // value is the value of the tag. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Value string `json:"value"` +} + +// NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform. +// +union +type NutanixPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on Nutanix platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. +// This only includes fields that can be modified in the cluster. +type NutanixPlatformSpec struct { + // prismCentral holds the endpoint address and port to access the Nutanix Prism Central. + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +required + PrismCentral NutanixPrismEndpoint `json:"prismCentral"` + + // prismElements holds one or more endpoint address and port data to access the Nutanix + // Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one + // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) + // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) + // spread over multiple Prism Elements (clusters) of the Prism Central. + // +required + // +listType=map + // +listMapKey=name + PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` + + // failureDomains configures failure domains information for the Nutanix platform. + // When set, the failure domains defined here may be used to spread Machines across + // prism element clusters to improve fault tolerance of the cluster. + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 + // +listType=map + // +listMapKey=name + // +optional + FailureDomains []NutanixFailureDomain `json:"failureDomains"` +} + +// NutanixFailureDomain configures failure domain information for the Nutanix platform. +type NutanixFailureDomain struct { + // name defines the unique name of a failure domain. + // Name is required and must be at most 64 characters in length. + // It must consist of only lower case alphanumeric characters and hyphens (-). + // It must start and end with an alphanumeric character. + // This value is arbitrary and is used to identify the failure domain within the platform. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` + Name string `json:"name"` + + // cluster is to identify the cluster (the Prism Element under management of the Prism Central), + // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained + // from the Prism Central console or using the prism_central API. + // +required + Cluster NutanixResourceIdentifier `json:"cluster"` + + // subnets holds a list of identifiers (one or more) of the cluster's network subnets + // If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. + // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be + // obtained from the Prism Central console or using the prism_central API. + // +required + // +kubebuilder:validation:MinItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 + // +openshift:validation:FeatureGateAwareXValidation:featureGate=NutanixMultiSubnets,rule="self.all(x, self.exists_one(y, x == y))",message="each subnet must be unique" + // +listType=atomic + Subnets []NutanixResourceIdentifier `json:"subnets"` +} + +// NutanixIdentifierType is an enumeration of different resource identifier types. +// +kubebuilder:validation:Enum:=UUID;Name +type NutanixIdentifierType string + +const ( + // NutanixIdentifierUUID is a resource identifier identifying the object by UUID. + NutanixIdentifierUUID NutanixIdentifierType = "UUID" + + // NutanixIdentifierName is a resource identifier identifying the object by Name. + NutanixIdentifierName NutanixIdentifierType = "Name" +) + +// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'UUID' ? has(self.uuid) : !has(self.uuid)",message="uuid configuration is required when type is UUID, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Name' ? has(self.name) : !has(self.name)",message="name configuration is required when type is Name, and forbidden otherwise" +// +union +type NutanixResourceIdentifier struct { + // type is the identifier type to use for this resource. + // +unionDiscriminator + // +required + Type NutanixIdentifierType `json:"type"` + + // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + // +optional + UUID *string `json:"uuid,omitempty"` + + // name is the resource name in the PC. It cannot be empty if the type is Name. + // +optional + Name *string `json:"name,omitempty"` +} + +// NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) +type NutanixPrismEndpoint struct { + // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + // +required + // +kubebuilder:validation:MaxLength=256 + Address string `json:"address"` + + // port is the port number to access the Nutanix Prism Central or Element (cluster) + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port int32 `json:"port"` +} + +// NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) +type NutanixPrismElementEndpoint struct { + // name is the name of the Prism Element (cluster). This value will correspond with + // the cluster field configured on other resources (eg Machines, PVCs, etc). + // +required + // +kubebuilder:validation:MaxLength=256 + Name string `json:"name"` + + // endpoint holds the endpoint address and port data of the Prism Element (cluster). + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +required + Endpoint NutanixPrismEndpoint `json:"endpoint"` +} + +// NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" +type NutanixPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + IngressIPs []string `json:"ingressIPs"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +optional + LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InfrastructureList is +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InfrastructureList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Infrastructure `json:"items"` +} + +// IP is an IP address (for example, "10.0.0.0" or "fd00::"). +// +kubebuilder:validation:XValidation:rule="isIP(self)",message="value must be a valid IP address" +// +kubebuilder:validation:MaxLength:=39 +// +kubebuilder:validation:MinLength:=1 +type IP string + +// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). +// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address" +// +kubebuilder:validation:MaxLength:=43 +// +kubebuilder:validation:MinLength:=1 +type CIDR string diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_ingress.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_ingress.go new file mode 100644 index 0000000000..f70fe8f440 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -0,0 +1,332 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress holds cluster-wide information about ingress, including the default ingress domain +// used for routes. The canonical name is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=ingresses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Ingress struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec IngressSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status IngressStatus `json:"status"` +} + +type IngressSpec struct { + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + // + // It is also used as the default wildcard domain suffix for ingress. The + // default ingresscontroller domain will follow this pattern: "*.". + // + // Once set, changing domain is not currently supported. + Domain string `json:"domain"` + + // appsDomain is an optional domain to use instead of the one specified + // in the domain field when a Route is created without specifying an explicit + // host. If appsDomain is nonempty, this value is used to generate default + // host values for Route. Unlike domain, appsDomain may be modified after + // installation. + // This assumes a new ingresscontroller has been setup with a wildcard + // certificate. + // +optional + AppsDomain string `json:"appsDomain,omitempty"` + + // componentRoutes is an optional list of routes that are managed by OpenShift components + // that a cluster-admin is able to configure the hostname and serving certificate for. + // The namespace and name of each route in this list should match an existing entry in the + // status.componentRoutes list. + // + // To determine the set of configurable Routes, look at namespace and name of entries in the + // .status.componentRoutes list, where participating operators write the status of + // configurable routes. + // +optional + // +listType=map + // +listMapKey=namespace + // +listMapKey=name + ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"` + + // requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes + // matching the domainPattern/s and namespaceSelector/s that are specified in the policy. + // Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route + // annotation, and affect route admission. + // + // A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: + // "haproxy.router.openshift.io/hsts_header" + // E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains + // + // - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route + // is rejected. + // - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies + // determines the route's admission status. + // - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then it may use any HSTS Policy annotation. + // + // The HSTS policy configuration may be changed after routes have already been created. An update to a previously + // admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. + // However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. + // + // Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid. + // +optional + RequiredHSTSPolicies []RequiredHSTSPolicy `json:"requiredHSTSPolicies,omitempty"` + + // loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure + // provider of the current cluster and are required for Ingress Controller to work on OpenShift. + // +optional + LoadBalancer LoadBalancer `json:"loadBalancer,omitempty"` +} + +// IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider +// of the current cluster. Since these are used at spec-level for the underlying cluster, it +// is supposed that only one of the spec structs is set. +// +union +type IngressPlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", + // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, + // and must handle unrecognized platforms as None if they do not support that platform. + // + // +unionDiscriminator + Type PlatformType `json:"type"` + + // aws contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSIngressSpec `json:"aws,omitempty"` +} + +type LoadBalancer struct { + // platform holds configuration specific to the underlying + // infrastructure provider for the ingress load balancers. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // +optional + Platform IngressPlatformSpec `json:"platform,omitempty"` +} + +// AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. +// This only includes fields that can be modified in the cluster. +// +union +type AWSIngressSpec struct { + // type allows user to set a load balancer type. + // When this field is set the default ingresscontroller will get created using the specified LBType. + // If this field is not set then the default ingress controller of LBType Classic will be created. + // Valid values are: + // + // * "Classic": A Classic Load Balancer that makes routing decisions at either + // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See + // the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + // + // * "NLB": A Network Load Balancer that makes routing decisions at the + // transport layer (TCP/SSL). See the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + // +unionDiscriminator + // +kubebuilder:validation:Enum:=NLB;Classic + // +required + Type AWSLBType `json:"type"` +} + +type AWSLBType string + +const ( + // NLB is the Network Load Balancer Type of AWS. Using NLB one can set NLB load balancer type for the default ingress controller. + NLB AWSLBType = "NLB" + + // Classic is the Classic Load Balancer Type of AWS. Using CLassic one can set Classic load balancer type for the default ingress controller. + Classic AWSLBType = "Classic" +) + +// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. +// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=512 +type ConsumingUser string + +// Hostname is a host name as defined by RFC-1123. +// + --- +// + The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it +// + allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric +// + characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. +// + ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ +// + +// + The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, +// + except that it allows hostnames longer than the maximum length: +// + ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ +// + +// + Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname +// + was saved via validation by the incorrect left operand of the | operator. +// + +// +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$` +type Hostname string + +type IngressStatus struct { + // componentRoutes is where participating operators place the current route status for routes whose + // hostnames and serving certificates can be customized by the cluster-admin. + // +optional + // +listType=map + // +listMapKey=namespace + // +listMapKey=name + ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` + + // defaultPlacement is set at installation time to control which + // nodes will host the ingress router pods by default. The options are + // control-plane nodes or worker nodes. + // + // This field works by dictating how the Cluster Ingress Operator will + // consider unset replicas and nodePlacement fields in IngressController + // resources when creating the corresponding Deployments. + // + // See the documentation for the IngressController replicas and nodePlacement + // fields for more information. + // + // When omitted, the default value is Workers + // + // +kubebuilder:validation:Enum:="ControlPlane";"Workers";"" + // +optional + DefaultPlacement DefaultPlacement `json:"defaultPlacement"` +} + +// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. +type ComponentRouteSpec struct { + // namespace is the namespace of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Name string `json:"name"` + + // hostname is the hostname that should be used by the route. + // +required + Hostname Hostname `json:"hostname"` + + // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. + // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"` +} + +// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. +type ComponentRouteStatus struct { + // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace + // ensures that no two components will conflict and the same component can be installed multiple times. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. It does not have to be the actual name of a route resource + // but it cannot be renamed. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Name string `json:"name"` + + // defaultHostname is the hostname of this route prior to customization. + // +required + DefaultHostname Hostname `json:"defaultHostname"` + + // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + // +kubebuilder:validation:MaxItems=5 + // +optional + ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"` + + // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single + // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + // +kubebuilder:validation:MinItems=1 + // +optional + CurrentHostnames []Hostname `json:"currentHostnames,omitempty"` + + // conditions are used to communicate the state of the componentRoutes entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If available is true, the content served by the route can be accessed by users. This includes cases + // where a default may continue to serve content while the customized route specified by the cluster-admin + // is being configured. + // + // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. + // The currentHostnames field may or may not be in effect. + // + // If Progressing is true, that means the component is taking some action related to the componentRoutes entry. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + // +kubebuilder:validation:MinItems=1 + // +required + RelatedObjects []ObjectReference `json:"relatedObjects"` +} + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type IngressList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Ingress `json:"items"` +} + +// DefaultPlacement defines the default placement of ingress router pods. +type DefaultPlacement string + +const ( + // "Workers" is for having router pods placed on worker nodes by default. + DefaultPlacementWorkers DefaultPlacement = "Workers" + + // "ControlPlane" is for having router pods placed on control-plane nodes by default. + DefaultPlacementControlPlane DefaultPlacement = "ControlPlane" +) diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_insights.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_insights.go new file mode 100644 index 0000000000..710d4303da --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_insights.go @@ -0,0 +1,231 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// InsightsDataGather provides data gather configuration options for the Insights Operator. +// +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2448 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=InsightsConfig +// +openshift:capability=Insights +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsDataGather struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +required + Spec InsightsDataGatherSpec `json:"spec,omitempty,omitzero"` +} + +// InsightsDataGatherSpec contains the configuration for the data gathering. +type InsightsDataGatherSpec struct { + // gatherConfig is a required spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress. + // +required + GatherConfig GatherConfig `json:"gatherConfig,omitempty,omitzero"` +} + +// GatherConfig provides data gathering configuration options. +type GatherConfig struct { + // dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. + // It may not exceed 2 items and must not contain duplicates. + // Valid values are ObfuscateNetworking and WorkloadNames. + // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + // When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. + // When omitted no obfuscation is applied. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))",message="dataPolicy items must be unique" + // +listType=atomic + // +optional + DataPolicy []DataPolicyOption `json:"dataPolicy,omitempty"` + // gatherers is a required field that specifies the configuration of the gatherers. + // +required + Gatherers Gatherers `json:"gatherers,omitempty,omitzero"` + // storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + // If omitted, the gathering job will use ephemeral storage. + // +optional + Storage Storage `json:"storage,omitempty,omitzero"` +} + +// Gatherers specifies the configuration of the gatherers +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Custom' ? has(self.custom) : !has(self.custom)",message="custom is required when mode is Custom, and forbidden otherwise" +// +union +type Gatherers struct { + // mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. + // When set to All, all gatherers will run and gather data. + // When set to None, all gatherers will be disabled and no data will be gathered. + // When set to Custom, the custom configuration from the custom field will be applied. + // +unionDiscriminator + // +required + Mode GatheringMode `json:"mode,omitempty"` + // custom provides gathering configuration. + // It is required when mode is Custom, and forbidden otherwise. + // Custom configuration allows user to disable only a subset of gatherers. + // Gatherers that are not explicitly disabled in custom configuration will run. + // +unionMember + // +optional + Custom Custom `json:"custom,omitempty,omitzero"` +} + +// Custom provides the custom configuration of gatherers +type Custom struct { + // configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. + // It may not exceed 100 items and each gatherer can be present only once. + // It is possible to disable an entire set of gatherers while allowing a specific function within that set. + // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + // +listType=map + // +listMapKey=name + // +required + Configs []GathererConfig `json:"configs,omitempty"` +} + +// GatheringMode defines the valid gathering modes. +// +kubebuilder:validation:Enum=All;None;Custom +type GatheringMode string + +const ( + // Enabled enables all gatherers + GatheringModeAll GatheringMode = "All" + // Disabled disables all gatherers + GatheringModeNone GatheringMode = "None" + // Custom applies the configuration from GatheringConfig. + GatheringModeCustom GatheringMode = "Custom" +) + +// DataPolicyOption declares valid data policy options +// +kubebuilder:validation:Enum=ObfuscateNetworking;WorkloadNames +type DataPolicyOption string + +const ( + // IP addresses and cluster domain name are obfuscated + DataPolicyOptionObfuscateNetworking DataPolicyOption = "ObfuscateNetworking" + // Data from Deployment Validation Operator are obfuscated + DataPolicyOptionObfuscateWorkloadNames DataPolicyOption = "WorkloadNames" +) + +// Storage provides persistent storage configuration options for gathering jobs. +// If the type is set to PersistentVolume, then the PersistentVolume must be defined. +// If the type is set to Ephemeral, then the PersistentVolume must not be defined. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'PersistentVolume' ? has(self.persistentVolume) : !has(self.persistentVolume)",message="persistentVolume is required when type is PersistentVolume, and forbidden otherwise" +// +union +type Storage struct { + // type is a required field that specifies the type of storage that will be used to store the Insights data archive. + // Valid values are "PersistentVolume" and "Ephemeral". + // When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + // When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + // +unionDiscriminator + // +required + Type StorageType `json:"type,omitempty"` + // persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + // The PersistentVolume must be created in the openshift-insights namespace. + // +unionMember + // +optional + PersistentVolume PersistentVolumeConfig `json:"persistentVolume,omitempty,omitzero"` +} + +// StorageType declares valid storage types +// +kubebuilder:validation:Enum=PersistentVolume;Ephemeral +type StorageType string + +const ( + // StorageTypePersistentVolume storage type + StorageTypePersistentVolume StorageType = "PersistentVolume" + // StorageTypeEphemeral storage type + StorageTypeEphemeral StorageType = "Ephemeral" +) + +// PersistentVolumeConfig provides configuration options for PersistentVolume storage. +type PersistentVolumeConfig struct { + // claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + // The PersistentVolumeClaim must be created in the openshift-insights namespace. + // +required + Claim PersistentVolumeClaimReference `json:"claim,omitempty,omitzero"` + // mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default mount path is /var/lib/insights-operator + // The path may not exceed 1024 characters and must not contain a colon. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:XValidation:rule="!self.contains(':')",message="mountPath must not contain a colon" + // +optional + MountPath string `json:"mountPath,omitempty"` +} + +// PersistentVolumeClaimReference is a reference to a PersistentVolumeClaim. +type PersistentVolumeClaimReference struct { + // name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. + // It is a string that follows the DNS1123 subdomain format. + // It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Name string `json:"name,omitempty"` +} + +// GathererConfig allows to configure specific gatherers +type GathererConfig struct { + // name is the required name of a specific gatherer. + // It may not exceed 256 characters. + // The format for a gatherer name is: {gatherer}/{function} where the function is optional. + // Gatherer consists of a lowercase letters only that may include underscores (_). + // Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). + // The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:XValidation:rule=`self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$")`,message=`gatherer name must be in the format of {gatherer}/{function} where the gatherer and function are lowercase letters only that may include underscores (_) and are separated by a forward slash (/) if the function is provided` + // +required + Name string `json:"name,omitempty"` + // state is a required field that allows you to configure specific gatherer. Valid values are "Enabled" and "Disabled". + // When set to Enabled the gatherer will run. + // When set to Disabled the gatherer will not run. + // +required + State GathererState `json:"state,omitempty"` +} + +// GathererState declares valid gatherer state types. +// +kubebuilder:validation:Enum=Enabled;Disabled +type GathererState string + +const ( + // GathererStateEnabled gatherer state, which means that the gatherer will run. + GathererStateEnabled GathererState = "Enabled" + // GathererStateDisabled gatherer state, which means that the gatherer will not run. + GathererStateDisabled GathererState = "Disabled" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsDataGatherList is a collection of items +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsDataGatherList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the required standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata,omitempty"` + // items is the required list of InsightsDataGather objects + // it may not exceed 100 items + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=100 + // +required + Items []InsightsDataGather `json:"items,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go new file mode 100644 index 0000000000..3293204fa4 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go @@ -0,0 +1,55 @@ +package v1 + +// KMSConfig defines the configuration for the KMS instance +// that will be used with KMSEncryptionProvider encryption +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws config is required when kms provider type is AWS, and forbidden otherwise" +// +union +type KMSConfig struct { + // type defines the kind of platform for the KMS provider. + // Available provider types are AWS only. + // + // +unionDiscriminator + // +required + Type KMSProviderType `json:"type"` + + // aws defines the key config for using an AWS KMS instance + // for the encryption. The AWS KMS instance is managed + // by the user outside the purview of the control plane. + // + // +unionMember + // +optional + AWS *AWSKMSConfig `json:"aws,omitempty"` +} + +// AWSKMSConfig defines the KMS config specific to AWS KMS provider +type AWSKMSConfig struct { + // keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. + // The value must adhere to the format `arn:aws:kms:::key/`, where: + // - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. + // - `` is a 12-digit numeric identifier for the AWS account. + // - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. + // + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9-]+$')",message="keyARN must follow the format `arn:aws:kms:::key/`. The account ID must be a 12 digit number and the region and key ID should consist only of lowercase hexadecimal characters and hyphens (-)." + // +required + KeyARN string `json:"keyARN"` + // region specifies the AWS region where the KMS instance exists, and follows the format + // `--`, e.g.: `us-east-1`. + // Only lowercase letters and hyphens followed by numbers are allowed. + // + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]+(-[a-z0-9]+)*$')",message="region must be a valid AWS region, consisting of lowercase characters, digits and hyphens (-) only." + // +required + Region string `json:"region"` +} + +// KMSProviderType is a specific supported KMS provider +// +kubebuilder:validation:Enum=AWS +type KMSProviderType string + +const ( + // AWSKMSProvider represents a supported KMS provider for use with AWS KMS + AWSKMSProvider KMSProviderType = "AWS" +) diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_network.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_network.go new file mode 100644 index 0000000000..fb8ed2fff7 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_network.go @@ -0,0 +1,306 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. +// Please view network.spec for an explanation on what applies when configuring this resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:compatibility-gen:level=1 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=networks,scope=Cluster +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Network struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration. + // As a general rule, this SHOULD NOT be read directly. Instead, you should + // consume the NetworkStatus, as it indicates the currently deployed configuration. + // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + // +required + Spec NetworkSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status NetworkStatus `json:"status"` +} + +// NetworkSpec is the desired network configuration. +// As a general rule, this SHOULD NOT be read directly. Instead, you should +// consume the NetworkStatus, as it indicates the currently deployed configuration. +// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +// +kubebuilder:validation:XValidation:rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" +type NetworkSpec struct { + // IP address pool to use for pod IPs. + // This field is immutable after installation. + // +listType=atomic + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // IP address pool for services. + // Currently, we only support a single entry here. + // This field is immutable after installation. + // +listType=atomic + ServiceNetwork []string `json:"serviceNetwork"` + + // networkType is the plugin that is to be deployed (e.g. OVNKubernetes). + // This should match a value that the cluster-network-operator understands, + // or else no networking will be installed. + // Currently supported values are: + // - OVNKubernetes + // This field is immutable after installation. + NetworkType string `json:"networkType"` + + // externalIP defines configuration for controllers that + // affect Service.ExternalIP. If nil, then ExternalIP is + // not allowed to be set. + // +optional + ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` + + // The port range allowed for Services of type NodePort. + // If not specified, the default of 30000-32767 will be used. + // Such Services without a NodePort specified will have one + // automatically allocated from this range. + // This parameter can be updated after the cluster is + // installed. + // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` + ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` + + // networkDiagnostics defines network diagnostics configuration. + // + // Takes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. + // If networkDiagnostics is not specified or is empty, + // and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, + // the network diagnostics feature will be disabled. + // + // +optional + NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` +} + +// NetworkStatus is the current network configuration. +type NetworkStatus struct { + // IP address pool to use for pod IPs. + // +listType=atomic + // +optional + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // IP address pool for services. + // Currently, we only support a single entry here. + // +listType=atomic + // +optional + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + + // networkType is the plugin that is deployed (e.g. OVNKubernetes). + // +optional + NetworkType string `json:"networkType,omitempty"` + + // clusterNetworkMTU is the MTU for inter-pod networking. + // +optional + ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` + + // migration contains the cluster network migration configuration. + // +optional + Migration *NetworkMigration `json:"migration,omitempty"` + + // conditions represents the observations of a network.config current state. + // Known .status.conditions.type are: "NetworkDiagnosticsAvailable" + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs +// are allocated. +type ClusterNetworkEntry struct { + // The complete block for pod IPs. + CIDR string `json:"cidr"` + + // The size (prefix) of block to allocate to each node. If this + // field is not used by the plugin, it can be left unset. + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field +// of a Service resource. +type ExternalIPConfig struct { + // policy is a set of restrictions applied to the ExternalIP field. + // If nil or empty, then ExternalIP is not allowed to be set. + // +optional + Policy *ExternalIPPolicy `json:"policy,omitempty"` + + // autoAssignCIDRs is a list of CIDRs from which to automatically assign + // Service.ExternalIP. These are assigned when the service is of type + // LoadBalancer. In general, this is only useful for bare-metal clusters. + // In Openshift 3.x, this was misleadingly called "IngressIPs". + // Automatically assigned External IPs are not affected by any + // ExternalIPPolicy rules. + // Currently, only one entry may be provided. + // +optional + // +listType=atomic + AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` +} + +// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP +// field in a Service. If the zero struct is supplied, then none are permitted. +// The policy controller always allows automatically assigned external IPs. +type ExternalIPPolicy struct { + // allowedCIDRs is the list of allowed CIDRs. + // +listType=atomic + AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` + + // rejectedCIDRs is the list of disallowed CIDRs. These take precedence + // over allowedCIDRs. + // +optional + // +listType=atomic + RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Network `json:"items"` +} + +// NetworkMigration represents the network migration status. +type NetworkMigration struct { + // networkType is the target plugin that is being deployed. + // DEPRECATED: network type migration is no longer supported, + // so this should always be unset. + // +optional + NetworkType string `json:"networkType,omitempty"` + + // mtu is the MTU configuration that is being deployed. + // +optional + MTU *MTUMigration `json:"mtu,omitempty"` +} + +// MTUMigration contains infomation about MTU migration. +type MTUMigration struct { + // network contains MTU migration configuration for the default network. + // +optional + Network *MTUMigrationValues `json:"network,omitempty"` + + // machine contains MTU migration configuration for the machine's uplink. + // +optional + Machine *MTUMigrationValues `json:"machine,omitempty"` +} + +// MTUMigrationValues contains the values for a MTU migration. +type MTUMigrationValues struct { + // to is the MTU to migrate to. + // +kubebuilder:validation:Minimum=0 + To *uint32 `json:"to"` + + // from is the MTU to migrate from. + // +kubebuilder:validation:Minimum=0 + // +optional + From *uint32 `json:"from,omitempty"` +} + +// NetworkDiagnosticsMode is an enumeration of the available network diagnostics modes +// Valid values are "", "All", "Disabled". +// +kubebuilder:validation:Enum:="";All;Disabled +type NetworkDiagnosticsMode string + +const ( + // NetworkDiagnosticsNoOpinion means that the user has no opinion and the platform is left + // to choose reasonable default. The current default is All and is a subject to change over time. + NetworkDiagnosticsNoOpinion NetworkDiagnosticsMode = "" + // NetworkDiagnosticsAll means that all network diagnostics checks are enabled + NetworkDiagnosticsAll NetworkDiagnosticsMode = "All" + // NetworkDiagnosticsDisabled means that network diagnostics is disabled + NetworkDiagnosticsDisabled NetworkDiagnosticsMode = "Disabled" +) + +// NetworkDiagnostics defines network diagnostics configuration + +type NetworkDiagnostics struct { + // mode controls the network diagnostics mode + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is All. + // + // +optional + Mode NetworkDiagnosticsMode `json:"mode"` + + // sourcePlacement controls the scheduling of network diagnostics source deployment + // + // See NetworkDiagnosticsSourcePlacement for more details about default values. + // + // +optional + SourcePlacement NetworkDiagnosticsSourcePlacement `json:"sourcePlacement"` + + // targetPlacement controls the scheduling of network diagnostics target daemonset + // + // See NetworkDiagnosticsTargetPlacement for more details about default values. + // + // +optional + TargetPlacement NetworkDiagnosticsTargetPlacement `json:"targetPlacement"` +} + +// NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components +type NetworkDiagnosticsSourcePlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is an empty list. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} + +// NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components +type NetworkDiagnosticsTargetPlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `- operator: "Exists"` which means that all taints are tolerated. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_node.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_node.go new file mode 100644 index 0000000000..2f627be11e --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_node.go @@ -0,0 +1,141 @@ +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node holds cluster-wide information about node specific features. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1107 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=nodes,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Node struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec NodeSpec `json:"spec"` + + // status holds observed values. + // +optional + Status NodeStatus `json:"status"` +} + +type NodeSpec struct { + // cgroupMode determines the cgroups version on the node + // +optional + CgroupMode CgroupMode `json:"cgroupMode,omitempty"` + + // workerLatencyProfile determins the how fast the kubelet is updating + // the status and corresponding reaction of the cluster + // +optional + WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + + // minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. + // Specifically, the apiserver will deny most authorization requests of kubelets that are older + // than the specified version, only allowing the kubelet to get and update its node object, and perform + // subjectaccessreviews. + // This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, + // and will eventually be marked as not ready. + // Its max length is 8, so maximum version allowed is either "9.999.99" or "99.99.99". + // Since the kubelet reports the version of the kubernetes release, not Openshift, this field references + // the underlying kubernetes version this version of Openshift is based off of. + // In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then + // they should set the minimumKubeletVersion to 1.30.0. + // When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. + // Thus, a kubelet with version "1.0.0-ec.0" will be compatible with minimumKubeletVersion "1.0.0" or earlier. + // +kubebuilder:validation:XValidation:rule="self == \"\" || self.matches('^[0-9]*.[0-9]*.[0-9]*$')",message="minmumKubeletVersion must be in a semver compatible format of x.y.z, or empty" + // +kubebuilder:validation:MaxLength:=8 + // +openshift:enable:FeatureGate=MinimumKubeletVersion + // +optional + MinimumKubeletVersion string `json:"minimumKubeletVersion"` +} + +type NodeStatus struct { + // conditions contain the details and the current state of the nodes.config object + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:validation:Enum=v2;"" +type CgroupMode string + +const ( + CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift + CgroupModeV2 CgroupMode = "v2" + CgroupModeDefault CgroupMode = CgroupModeV2 +) + +// +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction +type WorkerLatencyProfileType string + +const ( + // Medium Kubelet Update Frequency (heart-beat) and Average Reaction Time to unresponsive Node + MediumUpdateAverageReaction WorkerLatencyProfileType = "MediumUpdateAverageReaction" + + // Low Kubelet Update Frequency (heart-beat) and Slow Reaction Time to unresponsive Node + LowUpdateSlowReaction WorkerLatencyProfileType = "LowUpdateSlowReaction" + + // Default values of relavent Kubelet, Kube Controller Manager and Kube API Server + DefaultUpdateDefaultReaction WorkerLatencyProfileType = "Default" +) + +const ( + // DefaultNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeStatusUpdateFrequency = 10 * time.Second + // DefaultNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeMonitorGracePeriod = 40 * time.Second + // DefaultNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNotReadyTolerationSeconds = 300 + // DefaultUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultUnreachableTolerationSeconds = 300 + + // MediumNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeStatusUpdateFrequency = 20 * time.Second + // MediumNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeMonitorGracePeriod = 2 * time.Minute + // MediumNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNotReadyTolerationSeconds = 60 + // MediumUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumUnreachableTolerationSeconds = 60 + + // LowNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeStatusUpdateFrequency = 1 * time.Minute + // LowNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeMonitorGracePeriod = 5 * time.Minute + // LowNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNotReadyTolerationSeconds = 60 + // LowUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowUnreachableTolerationSeconds = 60 +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NodeList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Node `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_oauth.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_oauth.go new file mode 100644 index 0000000000..20845e4dbe --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -0,0 +1,597 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// OAuth Server and Identity Provider Config + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. +// It is used to configure the integrated OAuth server. +// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=oauths,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type OAuth struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + // spec holds user settable values for configuration + // +required + Spec OAuthSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OAuthStatus `json:"status"` +} + +// OAuthSpec contains desired cluster auth configuration +type OAuthSpec struct { + // identityProviders is an ordered list of ways for a user to identify themselves. + // When this list is empty, no identities are provisioned for users. + // +optional + // +listType=atomic + IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + // +optional + Templates OAuthTemplates `json:"templates"` +} + +// OAuthStatus shows current known state of OAuth server in the cluster +type OAuthStatus struct { + // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig) +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + + // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + // +optional + AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + + // accessTokenInactivityTimeout defines the token inactivity timeout + // for tokens granted by any client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. Takes valid time + // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed + // value for duration is 300s (5 minutes). If the timeout is configured + // per client, then that value takes precedence. If the timeout value is + // not specified and the client does not override the value, then tokens + // are valid until their lifetime. + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + // +optional + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +const ( + // LoginTemplateKey is the key of the login template in a secret + LoginTemplateKey = "login.html" + + // ProviderSelectionTemplateKey is the key for the provider selection template in a secret + ProviderSelectionTemplateKey = "providers.html" + + // ErrorsTemplateKey is the key for the errors template in a secret + ErrorsTemplateKey = "errors.html" + + // BindPasswordKey is the key for the LDAP bind password in a secret + BindPasswordKey = "bindPassword" + + // ClientSecretKey is the key for the oauth client secret data in a secret + ClientSecretKey = "clientSecret" + + // HTPasswdDataKey is the key for the htpasswd file data in a secret + HTPasswdDataKey = "htpasswd" +) + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is the name of a secret that specifies a go template to use to render the login page. + // The key "login.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default login page is used. + // If the specified template is not valid, the default login page is used. + // If unspecified, the default login page is used. + // The namespace for this secret is openshift-config. + // +optional + Login SecretNameReference `json:"login"` + + // providerSelection is the name of a secret that specifies a go template to use to render + // the provider selection page. + // The key "providers.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default provider selection page is used. + // If the specified template is not valid, the default provider selection page is used. + // If unspecified, the default provider selection page is used. + // The namespace for this secret is openshift-config. + // +optional + ProviderSelection SecretNameReference `json:"providerSelection"` + + // error is the name of a secret that specifies a go template to use to render error pages + // during the authentication or grant flow. + // The key "errors.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default error page is used. + // If the specified template is not valid, the default error page is used. + // If unspecified, the default error page is used. + // The namespace for this secret is openshift-config. + // +optional + Error SecretNameReference `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider. + // - It MUST be unique and not shared by any other identity provider used + // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" + // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName + Name string `json:"name"` + + // mappingMethod determines how identities from this provider are mapped to users + // Defaults to "claim" + // +optional + MappingMethod MappingMethodType `json:"mappingMethod,omitempty"` + + IdentityProviderConfig `json:",inline"` +} + +// MappingMethodType specifies how new identities should be mapped to users when they log in +type MappingMethodType string + +const ( + // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user + // with that user name is already mapped to another identity. + // Default. + MappingMethodClaim MappingMethodType = "claim" + + // MappingMethodLookup looks up existing users already mapped to an identity but does not + // automatically provision users or identities. Requires identities and users be set up + // manually or using an external process. + MappingMethodLookup MappingMethodType = "lookup" + + // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with + // that user name already exists, the identity is mapped to the existing user, adding to any + // existing identity mappings for the user. + MappingMethodAdd MappingMethodType = "add" +) + +type IdentityProviderType string + +const ( + // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth + IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth" + + // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials + IdentityProviderTypeGitHub IdentityProviderType = "GitHub" + + // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials + IdentityProviderTypeGitLab IdentityProviderType = "GitLab" + + // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials + IdentityProviderTypeGoogle IdentityProviderType = "Google" + + // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file + IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd" + + // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials + IdentityProviderTypeKeystone IdentityProviderType = "Keystone" + + // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials + IdentityProviderTypeLDAP IdentityProviderType = "LDAP" + + // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials + IdentityProviderTypeOpenID IdentityProviderType = "OpenID" + + // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials + IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader" +) + +// IdentityProviderConfig contains configuration for using a specific identity provider +type IdentityProviderConfig struct { + // type identifies the identity provider type for this entry. + Type IdentityProviderType `json:"type"` + + // Provider-specific configuration + // The json tag MUST match the `Type` specified above, case-insensitively + // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided + + // basicAuth contains configuration options for the BasicAuth IdP + // +optional + BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"` + + // github enables user authentication using GitHub credentials + // +optional + GitHub *GitHubIdentityProvider `json:"github,omitempty"` + + // gitlab enables user authentication using GitLab credentials + // +optional + GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"` + + // google enables user authentication using Google credentials + // +optional + Google *GoogleIdentityProvider `json:"google,omitempty"` + + // htpasswd enables user authentication using an HTPasswd file to validate credentials + // +optional + HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"` + + // keystone enables user authentication using keystone password credentials + // +optional + Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"` + + // ldap enables user authentication using LDAP credentials + // +optional + LDAP *LDAPIdentityProvider `json:"ldap,omitempty"` + + // openID enables user authentication using OpenID credentials + // +optional + OpenID *OpenIDIdentityProvider `json:"openID,omitempty"` + + // requestHeader enables user authentication using request header credentials + // +optional + RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"` +} + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +type BasicAuthIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server + OAuthRemoteConnectionInfo `json:",inline"` +} + +// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection +type OAuthRemoteConnectionInfo struct { + // url is the remote URL to connect to + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // tlsClientCert is an optional reference to a secret by name that contains the + // PEM-encoded TLS client certificate to present when connecting to the server. + // The key "tls.crt" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientCert SecretNameReference `json:"tlsClientCert"` + + // tlsClientKey is an optional reference to a secret by name that contains the + // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. + // The key "tls.key" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientKey SecretNameReference `json:"tlsClientKey"` +} + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +type HTPasswdIdentityProvider struct { + // fileData is a required reference to a secret by name containing the data to use as the htpasswd file. + // The key "htpasswd" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // If the specified htpasswd data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + FileData SecretNameReference `json:"fileData"` +} + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +type LDAPIdentityProvider struct { + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. + // The syntax of the URL is: + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + + // bindDN is an optional DN to bind with during the search phase. + // +optional + BindDN string `json:"bindDN"` + + // bindPassword is an optional reference to a secret by name + // containing a password to bind with during the search phase. + // The key "bindPassword" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + BindPassword SecretNameReference `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS + // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always + // attempt to connect using TLS, even when `insecure` is set to `true` + // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to + // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830. + Insecure bool `json:"insecure"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // First non-empty attribute is used. At least one attribute is required. If none of the listed + // attribute have a value, authentication fails. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +optional + Email []string `json:"email,omitempty"` +} + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +type KeystoneIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server + OAuthRemoteConnectionInfo `json:",inline"` + + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + + // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration + // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID + // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility + // +optional + // UseUsernameIdentity bool `json:"useUsernameIdentity"` +} + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +type RequestHeaderIdentityProvider struct { + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when login is set to true. + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be + // redirected here. + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when challenge is set to true. + ChallengeURL string `json:"challengeURL"` + + // ca is a required reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // Specifically, it allows verification of incoming requests to prevent header spoofing. + // The key "ca.crt" is used to locate the data. + // If the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // The namespace for this config map is openshift-config. + ClientCA ConfigMapNameReference `json:"ca"` + + // clientCommonNames is an optional list of common names to require a match from. If empty, any + // client certificate validated against the clientCA bundle is considered authoritative. + // +optional + ClientCommonNames []string `json:"clientCommonNames,omitempty"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +type GitHubIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // organizations optionally restricts which organizations are allowed to log in + // +optional + Organizations []string `json:"organizations,omitempty"` + + // teams optionally restricts which teams are allowed to log in. Format is /. + // +optional + Teams []string `json:"teams,omitempty"` + + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of + // GitHub Enterprise. + // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + // +optional + Hostname string `json:"hostname"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // This can only be configured when hostname is set to a non-empty value. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +type GitLabIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // url is the oauth server base URL + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +type GoogleIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + // +optional + HostedDomain string `json:"hostedDomain"` +} + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +type OpenIDIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + // +optional + ExtraScopes []string `json:"extraScopes,omitempty"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + // +optional + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"` + + // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. + // It must use the https scheme with no query or fragment component. + Issuer string `json:"issuer"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// UserIDClaim is the claim used to provide a stable identifier for OIDC identities. +// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability +// +// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can +// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique +// and never reassigned within the Issuer for a particular End-User, as described in Section 2. +// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the +// iss Claim and the sub Claim." +const UserIDClaim = "sub" + +// OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo +// responses +// +kubebuilder:validation:MinLength=1 +type OpenIDClaim string + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the sub claim + // +listType=atomic + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // +listType=atomic + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +listType=atomic + // +optional + Email []string `json:"email,omitempty"` + + // groups is the list of claims value of which should be used to synchronize groups + // from the OIDC provider to OpenShift for the user. + // If multiple claims are specified, the first one with a non-empty value is used. + // +listType=atomic + // +optional + Groups []OpenIDClaim `json:"groups,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []OAuth `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_operatorhub.go new file mode 100644 index 0000000000..a4971a20c5 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -0,0 +1,97 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorHubSpec defines the desired state of OperatorHub +type OperatorHubSpec struct { + // disableAllDefaultSources allows you to disable all the default hub + // sources. If this is true, a specific entry in sources can be used to + // enable a default source. If this is false, a specific entry in + // sources can be used to disable or enable a default source. + // +optional + DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` + // sources is the list of default hub sources and their configuration. + // If the list is empty, it implies that the default hub sources are + // enabled on the cluster unless disableAllDefaultSources is true. + // If disableAllDefaultSources is true and sources is not empty, + // the configuration present in sources will take precedence. The list of + // default hub sources and their current state will always be reflected in + // the status block. + // +optional + Sources []HubSource `json:"sources,omitempty"` +} + +// OperatorHubStatus defines the observed state of OperatorHub. The current +// state of the default hub sources will always be reflected here. +type OperatorHubStatus struct { + // sources encapsulates the result of applying the configuration for each + // hub source + // +optional + Sources []HubSourceStatus `json:"sources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHub is the Schema for the operatorhubs API. It can be used to change +// the state of the default hub sources for OperatorHub on the cluster from +// enabled to disabled and vice versa. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=operatorhubs,scope=Cluster +// +kubebuilder:subresource:status +// +genclient +// +genclient:nonNamespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=marketplace,operatorOrdering=01 +// +openshift:capability=marketplace +// +openshift:compatibility-gen:level=1 +type OperatorHub struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + Spec OperatorHubSpec `json:"spec"` + Status OperatorHubStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHubList contains a list of OperatorHub +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OperatorHubList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []OperatorHub `json:"items"` +} + +// HubSource is used to specify the hub source and its configuration +type HubSource struct { + // name is the name of one of the default hub sources + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:Required + Name string `json:"name"` + // disabled is used to disable a default hub source on cluster + // +kubebuilder:Required + Disabled bool `json:"disabled"` +} + +// HubSourceStatus is used to reflect the current state of applying the +// configuration to a default source +type HubSourceStatus struct { + HubSource `json:",omitempty"` + // status indicates success or failure in applying the configuration + Status string `json:"status,omitempty"` + // message provides more information regarding failures + Message string `json:"message,omitempty"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_project.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_project.go new file mode 100644 index 0000000000..3d219862be --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_project.go @@ -0,0 +1,70 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds cluster-wide information about Project. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=projects,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Project struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ProjectSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProjectStatus `json:"status"` +} + +// TemplateReference references a template in a specific namespace. +// The namespace must be specified at the point of use. +type TemplateReference struct { + // name is the metadata.name of the referenced project request template + Name string `json:"name"` +} + +// ProjectSpec holds the project creation configuration. +type ProjectSpec struct { + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + // +optional + ProjectRequestMessage string `json:"projectRequestMessage"` + + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. + // This must point to a template in 'openshift-config' namespace. It is optional. + // If it is not specified, a default template is used. + // + // +optional + ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"` +} + +type ProjectStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Project `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_proxy.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_proxy.go new file mode 100644 index 0000000000..ed40176ce3 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -0,0 +1,110 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=proxies,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Proxy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user-settable values for the proxy configuration + // +required + Spec ProxySpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProxyStatus `json:"status"` +} + +// ProxySpec contains cluster proxy creation configuration. +type ProxySpec struct { + // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. + // Empty means unset and will not result in an env var. + // +optional + NoProxy string `json:"noProxy,omitempty"` + + // readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + // +optional + ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` + + // trustedCA is a reference to a ConfigMap containing a CA certificate bundle. + // The trustedCA field should only be consumed by a proxy validator. The + // validator is responsible for reading the certificate bundle from the required + // key "ca-bundle.crt", merging it with the system default trust bundle, + // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" + // in the "openshift-config-managed" namespace. Clients that expect to make + // proxy connections must use the trusted-ca-bundle for all HTTPS requests to + // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as + // well. + // + // The namespace for the ConfigMap referenced by trustedCA is + // "openshift-config". Here is an example ConfigMap (in yaml): + // + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: user-ca-bundle + // namespace: openshift-config + // data: + // ca-bundle.crt: | + // -----BEGIN CERTIFICATE----- + // Custom CA certificate bundle. + // -----END CERTIFICATE----- + // + // +optional + TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"` +} + +// ProxyStatus shows current known state of the cluster proxy. +type ProxyStatus struct { + // httpProxy is the URL of the proxy for HTTP requests. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProxyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Proxy `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_scheduling.go new file mode 100644 index 0000000000..a81ed9f30c --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -0,0 +1,146 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler +// and influence its placement decisions. The canonical name for this config is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=schedulers,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Scheduler struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec SchedulerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status SchedulerStatus `json:"status"` +} + +type SchedulerSpec struct { + // DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. + // policy is a reference to a ConfigMap containing scheduler policy which has + // user specified predicates and priorities. If this ConfigMap is not available + // scheduler will default to use DefaultAlgorithmProvider. + // The namespace for this configmap is openshift-config. + // +optional + Policy ConfigMapNameReference `json:"policy,omitempty"` + // profile sets which scheduling profile should be set in order to configure scheduling + // decisions for new pods. + // + // Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring" + // Defaults to "LowNodeUtilization" + // +optional + Profile SchedulerProfile `json:"profile,omitempty"` + // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. + // Deprecated: no longer needed, since DRA is GA starting with 4.21, and + // is enabled by' default in the cluster, this field will be removed in 4.24. + // +openshift:enable:FeatureGate=HyperShiftOnlyDynamicResourceAllocation + // +optional + ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"` + // defaultNodeSelector helps set the cluster-wide default node selector to + // restrict pod placement to specific nodes. This is applied to the pods + // created in all namespaces and creates an intersection with any existing + // nodeSelectors already set on a pod, additionally constraining that pod's selector. + // For example, + // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector + // field in pod spec to "type=user-node,region=east" to all pods created + // in all namespaces. Namespaces having project-wide node selectors won't be + // impacted even if this field is set. This adds an annotation section to + // the namespace. + // For example, if a new namespace is created with + // node-selector='type=user-node,region=east', + // the annotation openshift.io/node-selector: type=user-node,region=east + // gets added to the project. When the openshift.io/node-selector annotation + // is set on the project the value is used in preference to the value we are setting + // for defaultNodeSelector field. + // For instance, + // openshift.io/node-selector: "type=user-node,region=west" means + // that the default of "type=user-node,region=east" set in defaultNodeSelector + // would not be applied. + // +optional + DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` + // mastersSchedulable allows masters nodes to be schedulable. When this flag is + // turned on, all the master nodes in the cluster will be made schedulable, + // so that workload pods can run on them. The default value for this field is false, + // meaning none of the master nodes are schedulable. + // Important Note: Once the workload pods start running on the master nodes, + // extreme care must be taken to ensure that cluster-critical control plane components + // are not impacted. + // Please turn on this field after doing due diligence. + // +optional + MastersSchedulable bool `json:"mastersSchedulable"` +} + +// +kubebuilder:validation:Enum="";LowNodeUtilization;HighNodeUtilization;NoScoring +type SchedulerProfile string + +var ( + // LowNodeUtililization is the default, and defines a scheduling profile which prefers to + // spread pods evenly among nodes targeting low resource consumption on each node. + LowNodeUtilization SchedulerProfile = "LowNodeUtilization" + + // HighNodeUtilization defines a scheduling profile which packs as many pods as possible onto + // as few nodes as possible targeting a small node count but high resource usage on each node. + HighNodeUtilization SchedulerProfile = "HighNodeUtilization" + + // NoScoring defines a scheduling profile which tries to provide lower-latency scheduling + // at the expense of potentially less optimal pod placement decisions. + NoScoring SchedulerProfile = "NoScoring" +) + +// ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles +type ProfileCustomizations struct { + // dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler. + // Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod. + // Third-party resource drivers are responsible for tracking and allocating resources. + // Different kinds of resources support arbitrary parameters for defining requirements and initialization. + // Valid values are Enabled, Disabled and omitted. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. + // The current default is Disabled. + // +optional + DynamicResourceAllocation DRAEnablement `json:"dynamicResourceAllocation"` +} + +// +kubebuilder:validation:Enum:="";"Enabled";"Disabled" +type DRAEnablement string + +var ( + // DRAEnablementEnabled enables dynamic resource allocation feature + DRAEnablementEnabled DRAEnablement = "Enabled" + // DRAEnablementDisabled disables dynamic resource allocation feature + DRAEnablementDisabled DRAEnablement = "Disabled" +) + +type SchedulerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SchedulerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Scheduler `json:"items"` +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_testreporting.go new file mode 100644 index 0000000000..00953957f4 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_testreporting.go @@ -0,0 +1,45 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into +// the payload for later analysis on a per-payload basis. +// This doesn't need any CRD because it's never stored in the cluster. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type TestReporting struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec TestReportingSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status TestReportingStatus `json:"status"` +} + +type TestReportingSpec struct { + // testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. + TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` +} + +type FeatureGateTests struct { + // featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. + FeatureGate string `json:"featureGate"` + + // tests contains an item for every TestName + Tests []TestDetails `json:"tests"` +} + +type TestDetails struct { + // testName is the name of the test as it appears in junit XMLs. + // It does not include the suite name since the same test can be executed in many suites. + TestName string `json:"testName"` +} + +type TestReportingStatus struct { +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go new file mode 100644 index 0000000000..1e5189796e --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -0,0 +1,270 @@ +package v1 + +// TLSSecurityProfile defines the schema for a TLS security profile. This object +// is used by operators to apply TLS security settings to operands. +// +union +type TLSSecurityProfile struct { + // type is one of Old, Intermediate, Modern or Custom. Custom provides the + // ability to specify individual TLS security profile parameters. + // + // The profiles are currently based on version 5.0 of the Mozilla Server Side TLS + // configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for + // forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + // + // The profiles are intent based, so they may change over time as new ciphers are + // developed and existing ciphers are found to be insecure. Depending on + // precisely which ciphers are available to a process, the list may be reduced. + // + // +unionDiscriminator + // +optional + Type TLSProfileType `json:"type"` + + // old is a TLS profile for use when services need to be accessed by very old + // clients or libraries and should be used only as a last resort. + // + // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed + // by the "old" profile ciphers. + // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS10 + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // - DHE-RSA-CHACHA20-POLY1305 + // - ECDHE-ECDSA-AES128-SHA256 + // - ECDHE-RSA-AES128-SHA256 + // - ECDHE-ECDSA-AES128-SHA + // - ECDHE-RSA-AES128-SHA + // - ECDHE-ECDSA-AES256-SHA384 + // - ECDHE-RSA-AES256-SHA384 + // - ECDHE-ECDSA-AES256-SHA + // - ECDHE-RSA-AES256-SHA + // - DHE-RSA-AES128-SHA256 + // - DHE-RSA-AES256-SHA256 + // - AES128-GCM-SHA256 + // - AES256-GCM-SHA384 + // - AES128-SHA256 + // - AES256-SHA256 + // - AES128-SHA + // - AES256-SHA + // - DES-CBC3-SHA + // + // +optional + // +nullable + Old *OldTLSProfile `json:"old,omitempty"` + + // intermediate is a TLS profile for use when you do not need compatibility with + // legacy clients and want to remain highly secure while being compatible with + // most clients currently in use. + // + // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed + // by the "intermediate" profile ciphers. + // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS12 + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // + // +optional + // +nullable + Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` + + // modern is a TLS security profile for use with clients that support TLS 1.3 and + // do not need backward compatibility for older clients. + // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS13 + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // + // +optional + // +nullable + Modern *ModernTLSProfile `json:"modern,omitempty"` + + // custom is a user-defined TLS security profile. Be extremely careful using a custom + // profile as invalid configurations can be catastrophic. An example custom profile + // looks like this: + // + // minTLSVersion: VersionTLS11 + // ciphers: + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // + // +optional + // +nullable + Custom *CustomTLSProfile `json:"custom,omitempty"` +} + +// OldTLSProfile is a TLS security profile based on the "old" configuration of +// the Mozilla Server Side TLS configuration guidelines. +type OldTLSProfile struct{} + +// IntermediateTLSProfile is a TLS security profile based on the "intermediate" +// configuration of the Mozilla Server Side TLS configuration guidelines. +type IntermediateTLSProfile struct{} + +// ModernTLSProfile is a TLS security profile based on the "modern" configuration +// of the Mozilla Server Side TLS configuration guidelines. +type ModernTLSProfile struct{} + +// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful +// using a custom TLS profile as invalid configurations can be catastrophic. +type CustomTLSProfile struct { + TLSProfileSpec `json:",inline"` +} + +// TLSProfileType defines a TLS security profile type. +// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom +type TLSProfileType string + +const ( + // TLSProfileOldType sets parameters based on the "old" configuration of + // the Mozilla Server Side TLS configuration guidelines. + TLSProfileOldType TLSProfileType = "Old" + + // TLSProfileIntermediateType sets parameters based on the "intermediate" + // configuration of the Mozilla Server Side TLS configuration guidelines. + TLSProfileIntermediateType TLSProfileType = "Intermediate" + + // TLSProfileModernType sets parameters based on the "modern" configuration + // of the Mozilla Server Side TLS configuration guidelines. + TLSProfileModernType TLSProfileType = "Modern" + + // TLSProfileCustomType is a TLS security profile that allows for user-defined parameters. + TLSProfileCustomType TLSProfileType = "Custom" +) + +// TLSProfileSpec is the desired behavior of a TLSSecurityProfile. +type TLSProfileSpec struct { + // ciphers is used to specify the cipher algorithms that are negotiated + // during the TLS handshake. Operators may remove entries their operands + // do not support. For example, to use DES-CBC3-SHA (yaml): + // + // ciphers: + // - DES-CBC3-SHA + // + // +listType=atomic + Ciphers []string `json:"ciphers"` + // minTLSVersion is used to specify the minimal version of the TLS protocol + // that is negotiated during the TLS handshake. For example, to use TLS + // versions 1.1, 1.2 and 1.3 (yaml): + // + // minTLSVersion: VersionTLS11 + // + MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` +} + +// TLSProtocolVersion is a way to specify the protocol version used for TLS connections. +// Protocol versions are based on the following most common TLS configurations: +// +// https://ssl-config.mozilla.org/ +// +// Note that SSLv3.0 is not a supported protocol version due to well known +// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE +// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13 +type TLSProtocolVersion string + +const ( + // VersionTLSv10 is version 1.0 of the TLS security protocol. + VersionTLS10 TLSProtocolVersion = "VersionTLS10" + // VersionTLSv11 is version 1.1 of the TLS security protocol. + VersionTLS11 TLSProtocolVersion = "VersionTLS11" + // VersionTLSv12 is version 1.2 of the TLS security protocol. + VersionTLS12 TLSProtocolVersion = "VersionTLS12" + // VersionTLSv13 is version 1.3 of the TLS security protocol. + VersionTLS13 TLSProtocolVersion = "VersionTLS13" +) + +// TLSProfiles contains a map of TLSProfileType names to TLSProfileSpec. +// +// These profiles are based on version 5.0 of the Mozilla Server Side TLS +// configuration guidelines (2019-06-28) with TLS 1.3 cipher suites prepended for +// forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json +// +// NOTE: The caller needs to make sure to check that these constants are valid +// for their binary. Not all entries map to values for all binaries. In the case +// of ties, the kube-apiserver wins. Do not fail, just be sure to include only +// valid entries and everything will be ok. +var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ + TLSProfileOldType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + "DHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "DHE-RSA-AES128-SHA256", + "DHE-RSA-AES256-SHA256", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA256", + "AES256-SHA256", + "AES128-SHA", + "AES256-SHA", + "DES-CBC3-SHA", + }, + MinTLSVersion: VersionTLS10, + }, + TLSProfileIntermediateType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + }, + MinTLSVersion: VersionTLS12, + }, + TLSProfileModernType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + MinTLSVersion: VersionTLS13, + }, +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..30b85b78e9 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,6919 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServer) DeepCopyInto(out *APIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. +func (in *APIServer) DeepCopy() *APIServer { + if in == nil { + return nil + } + out := new(APIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { + *out = *in + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption. +func (in *APIServerEncryption) DeepCopy() *APIServerEncryption { + if in == nil { + return nil + } + out := new(APIServerEncryption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerList) DeepCopyInto(out *APIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList. +func (in *APIServerList) DeepCopy() *APIServerList { + if in == nil { + return nil + } + out := new(APIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ServingCertificate = in.ServingCertificate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert. +func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert { + if in == nil { + return nil + } + out := new(APIServerNamedServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) { + *out = *in + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]APIServerNamedServingCert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts. +func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts { + if in == nil { + return nil + } + out := new(APIServerServingCerts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { + *out = *in + in.ServingCerts.DeepCopyInto(&out.ServingCerts) + out.ClientCA = in.ClientCA + if in.AdditionalCORSAllowedOrigins != nil { + in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Encryption.DeepCopyInto(&out.Encryption) + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + in.Audit.DeepCopyInto(&out.Audit) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec. +func (in *APIServerSpec) DeepCopy() *APIServerSpec { + if in == nil { + return nil + } + out := new(APIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus. +func (in *APIServerStatus) DeepCopy() *APIServerStatus { + if in == nil { + return nil + } + out := new(APIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSDNSSpec) DeepCopyInto(out *AWSDNSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDNSSpec. +func (in *AWSDNSSpec) DeepCopy() *AWSDNSSpec { + if in == nil { + return nil + } + out := new(AWSDNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSIngressSpec) DeepCopyInto(out *AWSIngressSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIngressSpec. +func (in *AWSIngressSpec) DeepCopy() *AWSIngressSpec { + if in == nil { + return nil + } + out := new(AWSIngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSKMSConfig) DeepCopyInto(out *AWSKMSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSConfig. +func (in *AWSKMSConfig) DeepCopy() *AWSKMSConfig { + if in == nil { + return nil + } + out := new(AWSKMSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. +func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { + if in == nil { + return nil + } + out := new(AWSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. +func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { + if in == nil { + return nil + } + out := new(AWSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceptRisk) DeepCopyInto(out *AcceptRisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceptRisk. +func (in *AcceptRisk) DeepCopy() *AcceptRisk { + if in == nil { + return nil + } + out := new(AcceptRisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { + *out = *in + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]AdmissionPluginConfig, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.EnabledAdmissionPlugins != nil { + in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledAdmissionPlugins != nil { + in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. +func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { + if in == nil { + return nil + } + out := new(AdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { + *out = *in + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. +func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { + if in == nil { + return nil + } + out := new(AdmissionPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformSpec) DeepCopyInto(out *AlibabaCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformSpec. +func (in *AlibabaCloudPlatformSpec) DeepCopy() *AlibabaCloudPlatformSpec { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformStatus) DeepCopyInto(out *AlibabaCloudPlatformStatus) { + *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AlibabaCloudResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformStatus. +func (in *AlibabaCloudPlatformStatus) DeepCopy() *AlibabaCloudPlatformStatus { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudResourceTag) DeepCopyInto(out *AlibabaCloudResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudResourceTag. +func (in *AlibabaCloudResourceTag) DeepCopy() *AlibabaCloudResourceTag { + if in == nil { + return nil + } + out := new(AlibabaCloudResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Audit) DeepCopyInto(out *Audit) { + *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]AuditCustomRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. +func (in *Audit) DeepCopy() *Audit { + if in == nil { + return nil + } + out := new(Audit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditCustomRule) DeepCopyInto(out *AuditCustomRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditCustomRule. +func (in *AuditCustomRule) DeepCopy() *AuditCustomRule { + if in == nil { + return nil + } + out := new(AuditCustomRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + out.OAuthMetadata = in.OAuthMetadata + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]DeprecatedWebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + if in.WebhookTokenAuthenticator != nil { + in, out := &in.WebhookTokenAuthenticator, &out.WebhookTokenAuthenticator + *out = new(WebhookTokenAuthenticator) + **out = **in + } + if in.OIDCProviders != nil { + in, out := &in.OIDCProviders, &out.OIDCProviders + *out = make([]OIDCProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata + if in.OIDCClients != nil { + in, out := &in.OIDCClients, &out.OIDCClients + *out = make([]OIDCClientStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. +func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { + if in == nil { + return nil + } + out := new(AzurePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { + *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AzureResourceTag, len(*in)) + copy(*out, *in) + } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus. +func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { + if in == nil { + return nil + } + out := new(AzurePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureResourceTag) DeepCopyInto(out *AzureResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceTag. +func (in *AzureResourceTag) DeepCopy() *AzureResourceTag { + if in == nil { + return nil + } + out := new(AzureResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformLoadBalancer) DeepCopyInto(out *BareMetalPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformLoadBalancer. +func (in *BareMetalPlatformLoadBalancer) DeepCopy() *BareMetalPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(BareMetalPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformSpec. +func (in *BareMetalPlatformSpec) DeepCopy() *BareMetalPlatformSpec { + if in == nil { + return nil + } + out := new(BareMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(BareMetalPlatformLoadBalancer) + **out = **in + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus. +func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus { + if in == nil { + return nil + } + out := new(BareMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider. +func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) { + *out = *in + if in.DefaultProxy != nil { + in, out := &in.DefaultProxy, &out.DefaultProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.GitProxy != nil { + in, out := &in.GitProxy, &out.GitProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults. +func (in *BuildDefaults) DeepCopy() *BuildDefaults { + if in == nil { + return nil + } + out := new(BuildDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) { + *out = *in + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForcePull != nil { + in, out := &in.ForcePull, &out.ForcePull + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides. +func (in *BuildOverrides) DeepCopy() *BuildOverrides { + if in == nil { + return nil + } + out := new(BuildOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.BuildDefaults.DeepCopyInto(&out.BuildDefaults) + in.BuildOverrides.DeepCopyInto(&out.BuildOverrides) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertInfo) DeepCopyInto(out *CertInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. +func (in *CertInfo) DeepCopy() *CertInfo { + if in == nil { + return nil + } + out := new(CertInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. +func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { + if in == nil { + return nil + } + out := new(ClientConnectionOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudControllerManagerStatus) DeepCopyInto(out *CloudControllerManagerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerStatus. +func (in *CloudControllerManagerStatus) DeepCopy() *CloudControllerManagerStatus { + if in == nil { + return nil + } + out := new(CloudControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudLoadBalancerConfig) DeepCopyInto(out *CloudLoadBalancerConfig) { + *out = *in + if in.ClusterHosted != nil { + in, out := &in.ClusterHosted, &out.ClusterHosted + *out = new(CloudLoadBalancerIPs) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerConfig. +func (in *CloudLoadBalancerConfig) DeepCopy() *CloudLoadBalancerConfig { + if in == nil { + return nil + } + out := new(CloudLoadBalancerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudLoadBalancerIPs) DeepCopyInto(out *CloudLoadBalancerIPs) { + *out = *in + if in.APIIntLoadBalancerIPs != nil { + in, out := &in.APIIntLoadBalancerIPs, &out.APIIntLoadBalancerIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.APILoadBalancerIPs != nil { + in, out := &in.APILoadBalancerIPs, &out.APILoadBalancerIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressLoadBalancerIPs != nil { + in, out := &in.IngressLoadBalancerIPs, &out.IngressLoadBalancerIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerIPs. +func (in *CloudLoadBalancerIPs) DeepCopy() *CloudLoadBalancerIPs { + if in == nil { + return nil + } + out := new(CloudLoadBalancerIPs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + if in.PromQL != nil { + in, out := &in.PromQL, &out.PromQL + *out = new(PromQLClusterCondition) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicy. +func (in *ClusterImagePolicy) DeepCopy() *ClusterImagePolicy { + if in == nil { + return nil + } + out := new(ClusterImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyList) DeepCopyInto(out *ClusterImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyList. +func (in *ClusterImagePolicyList) DeepCopy() *ClusterImagePolicyList { + if in == nil { + return nil + } + out := new(ClusterImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicySpec) DeepCopyInto(out *ClusterImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicySpec. +func (in *ClusterImagePolicySpec) DeepCopy() *ClusterImagePolicySpec { + if in == nil { + return nil + } + out := new(ClusterImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyStatus. +func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus { + if in == nil { + return nil + } + out := new(ClusterImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator. +func (in *ClusterOperator) DeepCopy() *ClusterOperator { + if in == nil { + return nil + } + out := new(ClusterOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList. +func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList { + if in == nil { + return nil + } + out := new(ClusterOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec. +func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec { + if in == nil { + return nil + } + out := new(ClusterOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]OperandVersion, len(*in)) + copy(*out, *in) + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus. +func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition. +func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition { + if in == nil { + return nil + } + out := new(ClusterOperatorStatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. +func (in *ClusterVersion) DeepCopy() *ClusterVersion { + if in == nil { + return nil + } + out := new(ClusterVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionCapabilitiesSpec) DeepCopyInto(out *ClusterVersionCapabilitiesSpec) { + *out = *in + if in.AdditionalEnabledCapabilities != nil { + in, out := &in.AdditionalEnabledCapabilities, &out.AdditionalEnabledCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesSpec. +func (in *ClusterVersionCapabilitiesSpec) DeepCopy() *ClusterVersionCapabilitiesSpec { + if in == nil { + return nil + } + out := new(ClusterVersionCapabilitiesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionCapabilitiesStatus) DeepCopyInto(out *ClusterVersionCapabilitiesStatus) { + *out = *in + if in.EnabledCapabilities != nil { + in, out := &in.EnabledCapabilities, &out.EnabledCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + if in.KnownCapabilities != nil { + in, out := &in.KnownCapabilities, &out.KnownCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesStatus. +func (in *ClusterVersionCapabilitiesStatus) DeepCopy() *ClusterVersionCapabilitiesStatus { + if in == nil { + return nil + } + out := new(ClusterVersionCapabilitiesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList. +func (in *ClusterVersionList) DeepCopy() *ClusterVersionList { + if in == nil { + return nil + } + out := new(ClusterVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { + *out = *in + if in.DesiredUpdate != nil { + in, out := &in.DesiredUpdate, &out.DesiredUpdate + *out = new(Update) + (*in).DeepCopyInto(*out) + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(ClusterVersionCapabilitiesSpec) + (*in).DeepCopyInto(*out) + } + if in.SignatureStores != nil { + in, out := &in.SignatureStores, &out.SignatureStores + *out = make([]SignatureStore, len(*in)) + copy(*out, *in) + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ComponentOverride, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec. +func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec { + if in == nil { + return nil + } + out := new(ClusterVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { + *out = *in + in.Desired.DeepCopyInto(&out.Desired) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]UpdateHistory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Capabilities.DeepCopyInto(&out.Capabilities) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailableUpdates != nil { + in, out := &in.AvailableUpdates, &out.AvailableUpdates + *out = make([]Release, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConditionalUpdates != nil { + in, out := &in.ConditionalUpdates, &out.ConditionalUpdates + *out = make([]ConditionalUpdate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConditionalUpdateRisks != nil { + in, out := &in.ConditionalUpdateRisks, &out.ConditionalUpdateRisks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. +func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { + if in == nil { + return nil + } + out := new(ClusterVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride. +func (in *ComponentOverride) DeepCopy() *ComponentOverride { + if in == nil { + return nil + } + out := new(ComponentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) { + *out = *in + out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec. +func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec { + if in == nil { + return nil + } + out := new(ComponentRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) { + *out = *in + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.CurrentHostnames != nil { + in, out := &in.CurrentHostnames, &out.CurrentHostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus. +func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { + if in == nil { + return nil + } + out := new(ComponentRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { + *out = *in + in.Release.DeepCopyInto(&out.Release) + if in.RiskNames != nil { + in, out := &in.RiskNames, &out.RiskNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Risks != nil { + in, out := &in.Risks, &out.Risks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdate. +func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { + if in == nil { + return nil + } + out := new(ConditionalUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchingRules != nil { + in, out := &in.MatchingRules, &out.MatchingRules + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdateRisk. +func (in *ConditionalUpdateRisk) DeepCopy() *ConditionalUpdateRisk { + if in == nil { + return nil + } + out := new(ConditionalUpdateRisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. +func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { + if in == nil { + return nil + } + out := new(ConfigMapFileReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference. +func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference { + if in == nil { + return nil + } + out := new(ConfigMapNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication. +func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication { + if in == nil { + return nil + } + out := new(ConsoleAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + out.Authentication = in.Authentication + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Custom) DeepCopyInto(out *Custom) { + *out = *in + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make([]GathererConfig, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Custom. +func (in *Custom) DeepCopy() *Custom { + if in == nil { + return nil + } + out := new(Custom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]FeatureGateName, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]FeatureGateName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates. +func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates { + if in == nil { + return nil + } + out := new(CustomFeatureGates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { + *out = *in + in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. +func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { + if in == nil { + return nil + } + out := new(CustomTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSPlatformSpec) DeepCopyInto(out *DNSPlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSDNSSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSPlatformSpec. +func (in *DNSPlatformSpec) DeepCopy() *DNSPlatformSpec { + if in == nil { + return nil + } + out := new(DNSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.PublicZone != nil { + in, out := &in.PublicZone, &out.PublicZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + if in.PrivateZone != nil { + in, out := &in.PrivateZone, &out.PrivateZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + in.Platform.DeepCopyInto(&out.Platform) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZone) DeepCopyInto(out *DNSZone) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. +func (in *DNSZone) DeepCopy() *DNSZone { + if in == nil { + return nil + } + out := new(DNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. +func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { + if in == nil { + return nil + } + out := new(DelegatedAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. +func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { + if in == nil { + return nil + } + out := new(DelegatedAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopyInto(out *DeprecatedWebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedWebhookTokenAuthenticator. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopy() *DeprecatedWebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(DeprecatedWebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformSpec) DeepCopyInto(out *EquinixMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformSpec. +func (in *EquinixMetalPlatformSpec) DeepCopy() *EquinixMetalPlatformSpec { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformStatus) DeepCopyInto(out *EquinixMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformStatus. +func (in *EquinixMetalPlatformStatus) DeepCopy() *EquinixMetalPlatformStatus { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { + *out = *in + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { + if in == nil { + return nil + } + out := new(EtcdConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { + *out = *in + in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { + if in == nil { + return nil + } + out := new(EtcdStorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ExternalIPPolicy) + (*in).DeepCopyInto(*out) + } + if in.AutoAssignCIDRs != nil { + in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig. +func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig { + if in == nil { + return nil + } + out := new(ExternalIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) { + *out = *in + if in.AllowedCIDRs != nil { + in, out := &in.AllowedCIDRs, &out.AllowedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RejectedCIDRs != nil { + in, out := &in.RejectedCIDRs, &out.RejectedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy. +func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { + if in == nil { + return nil + } + out := new(ExternalIPPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalPlatformSpec) DeepCopyInto(out *ExternalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformSpec. +func (in *ExternalPlatformSpec) DeepCopy() *ExternalPlatformSpec { + if in == nil { + return nil + } + out := new(ExternalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalPlatformStatus) DeepCopyInto(out *ExternalPlatformStatus) { + *out = *in + out.CloudControllerManager = in.CloudControllerManager + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformStatus. +func (in *ExternalPlatformStatus) DeepCopy() *ExternalPlatformStatus { + if in == nil { + return nil + } + out := new(ExternalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate. +func (in *FeatureGate) DeepCopy() *FeatureGate { + if in == nil { + return nil + } + out := new(FeatureGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateAttributes) DeepCopyInto(out *FeatureGateAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateAttributes. +func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes { + if in == nil { + return nil + } + out := new(FeatureGateAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]FeatureGateAttributes, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]FeatureGateAttributes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDetails. +func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails { + if in == nil { + return nil + } + out := new(FeatureGateDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FeatureGate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList. +func (in *FeatureGateList) DeepCopy() *FeatureGateList { + if in == nil { + return nil + } + out := new(FeatureGateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) { + *out = *in + if in.CustomNoUpgrade != nil { + in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade + *out = new(CustomFeatureGates) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection. +func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection { + if in == nil { + return nil + } + out := new(FeatureGateSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) { + *out = *in + in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec. +func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { + if in == nil { + return nil + } + out := new(FeatureGateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]FeatureGateDetails, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus. +func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { + if in == nil { + return nil + } + out := new(FeatureGateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateTests) DeepCopyInto(out *FeatureGateTests) { + *out = *in + if in.Tests != nil { + in, out := &in.Tests, &out.Tests + *out = make([]TestDetails, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateTests. +func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { + if in == nil { + return nil + } + out := new(FeatureGateTests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformSpec. +func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec { + if in == nil { + return nil + } + out := new(GCPPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { + *out = *in + if in.ResourceLabels != nil { + in, out := &in.ResourceLabels, &out.ResourceLabels + *out = make([]GCPResourceLabel, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]GCPResourceTag, len(*in)) + copy(*out, *in) + } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus. +func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { + if in == nil { + return nil + } + out := new(GCPPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPResourceLabel) DeepCopyInto(out *GCPResourceLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceLabel. +func (in *GCPResourceLabel) DeepCopy() *GCPResourceLabel { + if in == nil { + return nil + } + out := new(GCPResourceLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPResourceTag) DeepCopyInto(out *GCPResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceTag. +func (in *GCPResourceTag) DeepCopy() *GCPResourceTag { + if in == nil { + return nil + } + out := new(GCPResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { + *out = *in + if in.DataPolicy != nil { + in, out := &in.DataPolicy, &out.DataPolicy + *out = make([]DataPolicyOption, len(*in)) + copy(*out, *in) + } + in.Gatherers.DeepCopyInto(&out.Gatherers) + out.Storage = in.Storage + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherConfig. +func (in *GatherConfig) DeepCopy() *GatherConfig { + if in == nil { + return nil + } + out := new(GatherConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GathererConfig) DeepCopyInto(out *GathererConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererConfig. +func (in *GathererConfig) DeepCopy() *GathererConfig { + if in == nil { + return nil + } + out := new(GathererConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gatherers) DeepCopyInto(out *Gatherers) { + *out = *in + in.Custom.DeepCopyInto(&out.Custom) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gatherers. +func (in *Gatherers) DeepCopy() *Gatherers { + if in == nil { + return nil + } + out := new(Gatherers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + in.StorageConfig.DeepCopyInto(&out.StorageConfig) + in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) + out.KubeClientConfig = in.KubeClientConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. +func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { + if in == nil { + return nil + } + out := new(GenericAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + out.LeaderElection = in.LeaderElection + out.Authentication = in.Authentication + out.Authorization = in.Authorization + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig. +func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig { + if in == nil { + return nil + } + out := new(GenericControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) { + *out = *in + out.FileData = in.FileData + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider. +func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { + if in == nil { + return nil + } + out := new(HTTPServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSource) DeepCopyInto(out *HubSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource. +func (in *HubSource) DeepCopy() *HubSource { + if in == nil { + return nil + } + out := new(HubSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) { + *out = *in + out.HubSource = in.HubSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus. +func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { + if in == nil { + return nil + } + out := new(HubSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]IBMCloudServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. +func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { + if in == nil { + return nil + } + out := new(IBMCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformStatus) DeepCopyInto(out *IBMCloudPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]IBMCloudServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformStatus. +func (in *IBMCloudPlatformStatus) DeepCopy() *IBMCloudPlatformStatus { + if in == nil { + return nil + } + out := new(IBMCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudServiceEndpoint) DeepCopyInto(out *IBMCloudServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudServiceEndpoint. +func (in *IBMCloudServiceEndpoint) DeepCopy() *IBMCloudServiceEndpoint { + if in == nil { + return nil + } + out := new(IBMCloudServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthIdentityProvider) + **out = **in + } + if in.GitHub != nil { + in, out := &in.GitHub, &out.GitHub + *out = new(GitHubIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.GitLab != nil { + in, out := &in.GitLab, &out.GitLab + *out = new(GitLabIdentityProvider) + **out = **in + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleIdentityProvider) + **out = **in + } + if in.HTPasswd != nil { + in, out := &in.HTPasswd, &out.HTPasswd + *out = new(HTPasswdIdentityProvider) + **out = **in + } + if in.Keystone != nil { + in, out := &in.Keystone, &out.Keystone + *out = new(KeystoneIdentityProvider) + **out = **in + } + if in.LDAP != nil { + in, out := &in.LDAP, &out.LDAP + *out = new(LDAPIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.OpenID != nil { + in, out := &in.OpenID, &out.OpenID + *out = new(OpenIDIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderIdentityProvider) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig. +func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig { + if in == nil { + return nil + } + out := new(IdentityProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicy) DeepCopyInto(out *ImageContentPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicy. +func (in *ImageContentPolicy) DeepCopy() *ImageContentPolicy { + if in == nil { + return nil + } + out := new(ImageContentPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicyList) DeepCopyInto(out *ImageContentPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageContentPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicyList. +func (in *ImageContentPolicyList) DeepCopy() *ImageContentPolicyList { + if in == nil { + return nil + } + out := new(ImageContentPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicySpec) DeepCopyInto(out *ImageContentPolicySpec) { + *out = *in + if in.RepositoryDigestMirrors != nil { + in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors + *out = make([]RepositoryDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicySpec. +func (in *ImageContentPolicySpec) DeepCopy() *ImageContentPolicySpec { + if in == nil { + return nil + } + out := new(ImageContentPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSet) DeepCopyInto(out *ImageDigestMirrorSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSet. +func (in *ImageDigestMirrorSet) DeepCopy() *ImageDigestMirrorSet { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageDigestMirrorSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetList) DeepCopyInto(out *ImageDigestMirrorSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageDigestMirrorSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetList. +func (in *ImageDigestMirrorSetList) DeepCopy() *ImageDigestMirrorSetList { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageDigestMirrorSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetSpec) DeepCopyInto(out *ImageDigestMirrorSetSpec) { + *out = *in + if in.ImageDigestMirrors != nil { + in, out := &in.ImageDigestMirrors, &out.ImageDigestMirrors + *out = make([]ImageDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetSpec. +func (in *ImageDigestMirrorSetSpec) DeepCopy() *ImageDigestMirrorSetSpec { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetStatus) DeepCopyInto(out *ImageDigestMirrorSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetStatus. +func (in *ImageDigestMirrorSetStatus) DeepCopy() *ImageDigestMirrorSetStatus { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrors) DeepCopyInto(out *ImageDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]ImageMirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrors. +func (in *ImageDigestMirrors) DeepCopy() *ImageDigestMirrors { + if in == nil { + return nil + } + out := new(ImageDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicy) DeepCopyInto(out *ImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicy. +func (in *ImagePolicy) DeepCopy() *ImagePolicy { + if in == nil { + return nil + } + out := new(ImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopyInto(out *ImagePolicyFulcioCAWithRekorRootOfTrust) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyFulcioCAWithRekorRootOfTrust. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopy() *ImagePolicyFulcioCAWithRekorRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyFulcioCAWithRekorRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyList. +func (in *ImagePolicyList) DeepCopy() *ImagePolicyList { + if in == nil { + return nil + } + out := new(ImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPKIRootOfTrust) DeepCopyInto(out *ImagePolicyPKIRootOfTrust) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPKIRootOfTrust. +func (in *ImagePolicyPKIRootOfTrust) DeepCopy() *ImagePolicyPKIRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPKIRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopyInto(out *ImagePolicyPublicKeyRootOfTrust) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPublicKeyRootOfTrust. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopy() *ImagePolicyPublicKeyRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPublicKeyRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicySpec. +func (in *ImagePolicySpec) DeepCopy() *ImagePolicySpec { + if in == nil { + return nil + } + out := new(ImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyStatus. +func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { + if in == nil { + return nil + } + out := new(ImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSigstoreVerificationPolicy) DeepCopyInto(out *ImageSigstoreVerificationPolicy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + if in.SignedIdentity != nil { + in, out := &in.SignedIdentity, &out.SignedIdentity + *out = new(PolicyIdentity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSigstoreVerificationPolicy. +func (in *ImageSigstoreVerificationPolicy) DeepCopy() *ImageSigstoreVerificationPolicy { + if in == nil { + return nil + } + out := new(ImageSigstoreVerificationPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.RegistrySources.DeepCopyInto(&out.RegistrySources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSet) DeepCopyInto(out *ImageTagMirrorSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSet. +func (in *ImageTagMirrorSet) DeepCopy() *ImageTagMirrorSet { + if in == nil { + return nil + } + out := new(ImageTagMirrorSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagMirrorSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetList) DeepCopyInto(out *ImageTagMirrorSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageTagMirrorSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetList. +func (in *ImageTagMirrorSetList) DeepCopy() *ImageTagMirrorSetList { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagMirrorSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetSpec) DeepCopyInto(out *ImageTagMirrorSetSpec) { + *out = *in + if in.ImageTagMirrors != nil { + in, out := &in.ImageTagMirrors, &out.ImageTagMirrors + *out = make([]ImageTagMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetSpec. +func (in *ImageTagMirrorSetSpec) DeepCopy() *ImageTagMirrorSetSpec { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetStatus) DeepCopyInto(out *ImageTagMirrorSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetStatus. +func (in *ImageTagMirrorSetStatus) DeepCopy() *ImageTagMirrorSetStatus { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrors) DeepCopyInto(out *ImageTagMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]ImageMirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrors. +func (in *ImageTagMirrors) DeepCopy() *ImageTagMirrors { + if in == nil { + return nil + } + out := new(ImageTagMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. +func (in *Infrastructure) DeepCopy() *Infrastructure { + if in == nil { + return nil + } + out := new(Infrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infrastructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infrastructure, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. +func (in *InfrastructureList) DeepCopy() *InfrastructureList { + if in == nil { + return nil + } + out := new(InfrastructureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { + *out = *in + out.CloudConfig = in.CloudConfig + in.PlatformSpec.DeepCopyInto(&out.PlatformSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. +func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { + if in == nil { + return nil + } + out := new(InfrastructureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { + *out = *in + if in.PlatformStatus != nil { + in, out := &in.PlatformStatus, &out.PlatformStatus + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. +func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { + if in == nil { + return nil + } + out := new(InfrastructureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPlatformSpec) DeepCopyInto(out *IngressPlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSIngressSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPlatformSpec. +func (in *IngressPlatformSpec) DeepCopy() *IngressPlatformSpec { + if in == nil { + return nil + } + out := new(IngressPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteSpec, len(*in)) + copy(*out, *in) + } + if in.RequiredHSTSPolicies != nil { + in, out := &in.RequiredHSTSPolicies, &out.RequiredHSTSPolicies + *out = make([]RequiredHSTSPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGather. +func (in *InsightsDataGather) DeepCopy() *InsightsDataGather { + if in == nil { + return nil + } + out := new(InsightsDataGather) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGather) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherList) DeepCopyInto(out *InsightsDataGatherList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsDataGather, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherList. +func (in *InsightsDataGatherList) DeepCopy() *InsightsDataGatherList { + if in == nil { + return nil + } + out := new(InsightsDataGatherList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGatherList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherSpec) DeepCopyInto(out *InsightsDataGatherSpec) { + *out = *in + in.GatherConfig.DeepCopyInto(&out.GatherConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherSpec. +func (in *InsightsDataGatherSpec) DeepCopy() *InsightsDataGatherSpec { + if in == nil { + return nil + } + out := new(InsightsDataGatherSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. +func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { + if in == nil { + return nil + } + out := new(IntermediateTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSKMSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfig. +func (in *KMSConfig) DeepCopy() *KMSConfig { + if in == nil { + return nil + } + out := new(KMSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider. +func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider { + if in == nil { + return nil + } + out := new(KeystoneIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { + *out = *in + out.ConnectionOverrides = in.ConnectionOverrides + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. +func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { + if in == nil { + return nil + } + out := new(KubeClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. +func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { + if in == nil { + return nil + } + out := new(KubevirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformStatus) DeepCopyInto(out *KubevirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformStatus. +func (in *KubevirtPlatformStatus) DeepCopy() *KubevirtPlatformStatus { + if in == nil { + return nil + } + out := new(KubevirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) { + *out = *in + out.BindPassword = in.BindPassword + out.CA = in.CA + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider. +func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. +func (in *LeaderElection) DeepCopy() *LeaderElection { + if in == nil { + return nil + } + out := new(LeaderElection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { + *out = *in + in.Platform.DeepCopyInto(&out.Platform) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. +func (in *LoadBalancer) DeepCopy() *LoadBalancer { + if in == nil { + return nil + } + out := new(LoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + if in.Machine != nil { + in, out := &in.Machine, &out.Machine + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. +func (in *MTUMigration) DeepCopy() *MTUMigration { + if in == nil { + return nil + } + out := new(MTUMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(uint32) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. +func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { + if in == nil { + return nil + } + out := new(MTUMigrationValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaxAgePolicy) DeepCopyInto(out *MaxAgePolicy) { + *out = *in + if in.LargestMaxAge != nil { + in, out := &in.LargestMaxAge, &out.LargestMaxAge + *out = new(int32) + **out = **in + } + if in.SmallestMaxAge != nil { + in, out := &in.SmallestMaxAge, &out.SmallestMaxAge + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaxAgePolicy. +func (in *MaxAgePolicy) DeepCopy() *MaxAgePolicy { + if in == nil { + return nil + } + out := new(MaxAgePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. +func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { + if in == nil { + return nil + } + out := new(ModernTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnostics) DeepCopyInto(out *NetworkDiagnostics) { + *out = *in + in.SourcePlacement.DeepCopyInto(&out.SourcePlacement) + in.TargetPlacement.DeepCopyInto(&out.TargetPlacement) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnostics. +func (in *NetworkDiagnostics) DeepCopy() *NetworkDiagnostics { + if in == nil { + return nil + } + out := new(NetworkDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopyInto(out *NetworkDiagnosticsSourcePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsSourcePlacement. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopy() *NetworkDiagnosticsSourcePlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsSourcePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopyInto(out *NetworkDiagnosticsTargetPlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsTargetPlacement. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopy() *NetworkDiagnosticsTargetPlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsTargetPlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(MTUMigration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExternalIP != nil { + in, out := &in.ExternalIP, &out.ExternalIP + *out = new(ExternalIPConfig) + (*in).DeepCopyInto(*out) + } + in.NetworkDiagnostics.DeepCopyInto(&out.NetworkDiagnostics) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixFailureDomain) DeepCopyInto(out *NutanixFailureDomain) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixFailureDomain. +func (in *NutanixFailureDomain) DeepCopy() *NutanixFailureDomain { + if in == nil { + return nil + } + out := new(NutanixFailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformLoadBalancer) DeepCopyInto(out *NutanixPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformLoadBalancer. +func (in *NutanixPlatformLoadBalancer) DeepCopy() *NutanixPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(NutanixPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) { + *out = *in + out.PrismCentral = in.PrismCentral + if in.PrismElements != nil { + in, out := &in.PrismElements, &out.PrismElements + *out = make([]NutanixPrismElementEndpoint, len(*in)) + copy(*out, *in) + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]NutanixFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformSpec. +func (in *NutanixPlatformSpec) DeepCopy() *NutanixPlatformSpec { + if in == nil { + return nil + } + out := new(NutanixPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformStatus) DeepCopyInto(out *NutanixPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(NutanixPlatformLoadBalancer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformStatus. +func (in *NutanixPlatformStatus) DeepCopy() *NutanixPlatformStatus { + if in == nil { + return nil + } + out := new(NutanixPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismElementEndpoint) DeepCopyInto(out *NutanixPrismElementEndpoint) { + *out = *in + out.Endpoint = in.Endpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismElementEndpoint. +func (in *NutanixPrismElementEndpoint) DeepCopy() *NutanixPrismElementEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismElementEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismEndpoint) DeepCopyInto(out *NutanixPrismEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismEndpoint. +func (in *NutanixPrismEndpoint) DeepCopy() *NutanixPrismEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) { + *out = *in + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier. +func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { + if in == nil { + return nil + } + out := new(NutanixResourceIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth) DeepCopyInto(out *OAuth) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth. +func (in *OAuth) DeepCopy() *OAuth { + if in == nil { + return nil + } + out := new(OAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuth) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthList) DeepCopyInto(out *OAuthList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList. +func (in *OAuthList) DeepCopy() *OAuthList { + if in == nil { + return nil + } + out := new(OAuthList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) { + *out = *in + out.CA = in.CA + out.TLSClientCert = in.TLSClientCert + out.TLSClientKey = in.TLSClientKey + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo. +func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo { + if in == nil { + return nil + } + out := new(OAuthRemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) { + *out = *in + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + out.Templates = in.Templates + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec. +func (in *OAuthSpec) DeepCopy() *OAuthSpec { + if in == nil { + return nil + } + out := new(OAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus. +func (in *OAuthStatus) DeepCopy() *OAuthStatus { + if in == nil { + return nil + } + out := new(OAuthStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + out.Login = in.Login + out.ProviderSelection = in.ProviderSelection + out.Error = in.Error + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientConfig) DeepCopyInto(out *OIDCClientConfig) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientConfig. +func (in *OIDCClientConfig) DeepCopy() *OIDCClientConfig { + if in == nil { + return nil + } + out := new(OIDCClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientReference) DeepCopyInto(out *OIDCClientReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientReference. +func (in *OIDCClientReference) DeepCopy() *OIDCClientReference { + if in == nil { + return nil + } + out := new(OIDCClientReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientStatus) DeepCopyInto(out *OIDCClientStatus) { + *out = *in + if in.CurrentOIDCClients != nil { + in, out := &in.CurrentOIDCClients, &out.CurrentOIDCClients + *out = make([]OIDCClientReference, len(*in)) + copy(*out, *in) + } + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientStatus. +func (in *OIDCClientStatus) DeepCopy() *OIDCClientStatus { + if in == nil { + return nil + } + out := new(OIDCClientStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.OIDCClients != nil { + in, out := &in.OIDCClients, &out.OIDCClients + *out = make([]OIDCClientConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]TokenClaimValidationRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]TokenUserValidationRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCProvider. +func (in *OIDCProvider) DeepCopy() *OIDCProvider { + if in == nil { + return nil + } + out := new(OIDCProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. +func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { + if in == nil { + return nil + } + out := new(OldTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]OpenIDClaim, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformLoadBalancer) DeepCopyInto(out *OpenStackPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformLoadBalancer. +func (in *OpenStackPlatformLoadBalancer) DeepCopy() *OpenStackPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(OpenStackPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec. +func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec { + if in == nil { + return nil + } + out := new(OpenStackPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(OpenStackPlatformLoadBalancer) + **out = **in + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus. +func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus { + if in == nil { + return nil + } + out := new(OpenStackPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperandVersion) DeepCopyInto(out *OperandVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion. +func (in *OperandVersion) DeepCopy() *OperandVersion { + if in == nil { + return nil + } + out := new(OperandVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHub) DeepCopyInto(out *OperatorHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub. +func (in *OperatorHub) DeepCopy() *OperatorHub { + if in == nil { + return nil + } + out := new(OperatorHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatorHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList. +func (in *OperatorHubList) DeepCopy() *OperatorHubList { + if in == nil { + return nil + } + out := new(OperatorHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec. +func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec { + if in == nil { + return nil + } + out := new(OperatorHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSourceStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus. +func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { + if in == nil { + return nil + } + out := new(OperatorHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformLoadBalancer) DeepCopyInto(out *OvirtPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformLoadBalancer. +func (in *OvirtPlatformLoadBalancer) DeepCopy() *OvirtPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(OvirtPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformSpec. +func (in *OvirtPlatformSpec) DeepCopy() *OvirtPlatformSpec { + if in == nil { + return nil + } + out := new(OvirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(OvirtPlatformLoadBalancer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus. +func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { + if in == nil { + return nil + } + out := new(OvirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKICertificateSubject. +func (in *PKICertificateSubject) DeepCopy() *PKICertificateSubject { + if in == nil { + return nil + } + out := new(PKICertificateSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimReference) DeepCopyInto(out *PersistentVolumeClaimReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimReference. +func (in *PersistentVolumeClaimReference) DeepCopy() *PersistentVolumeClaimReference { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeConfig) DeepCopyInto(out *PersistentVolumeConfig) { + *out = *in + out.Claim = in.Claim + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeConfig. +func (in *PersistentVolumeConfig) DeepCopy() *PersistentVolumeConfig { + if in == nil { + return nil + } + out := new(PersistentVolumeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformSpec) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformSpec) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformSpec) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformSpec) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformSpec) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformSpec) + **out = **in + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(NutanixPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalPlatformSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformStatus) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformStatus) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(NutanixPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalPlatformStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyFulcioSubject. +func (in *PolicyFulcioSubject) DeepCopy() *PolicyFulcioSubject { + if in == nil { + return nil + } + out := new(PolicyFulcioSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyIdentity) DeepCopyInto(out *PolicyIdentity) { + *out = *in + if in.PolicyMatchExactRepository != nil { + in, out := &in.PolicyMatchExactRepository, &out.PolicyMatchExactRepository + *out = new(PolicyMatchExactRepository) + **out = **in + } + if in.PolicyMatchRemapIdentity != nil { + in, out := &in.PolicyMatchRemapIdentity, &out.PolicyMatchRemapIdentity + *out = new(PolicyMatchRemapIdentity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyIdentity. +func (in *PolicyIdentity) DeepCopy() *PolicyIdentity { + if in == nil { + return nil + } + out := new(PolicyIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchExactRepository) DeepCopyInto(out *PolicyMatchExactRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchExactRepository. +func (in *PolicyMatchExactRepository) DeepCopy() *PolicyMatchExactRepository { + if in == nil { + return nil + } + out := new(PolicyMatchExactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchRemapIdentity) DeepCopyInto(out *PolicyMatchRemapIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchRemapIdentity. +func (in *PolicyMatchRemapIdentity) DeepCopy() *PolicyMatchRemapIdentity { + if in == nil { + return nil + } + out := new(PolicyMatchRemapIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(ImagePolicyPublicKeyRootOfTrust) + (*in).DeepCopyInto(*out) + } + if in.FulcioCAWithRekor != nil { + in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor + *out = new(ImagePolicyFulcioCAWithRekorRootOfTrust) + (*in).DeepCopyInto(*out) + } + if in.PKI != nil { + in, out := &in.PKI, &out.PKI + *out = new(ImagePolicyPKIRootOfTrust) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRootOfTrust. +func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { + if in == nil { + return nil + } + out := new(PolicyRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. +func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { + if in == nil { + return nil + } + out := new(PowerVSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformStatus) DeepCopyInto(out *PowerVSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformStatus. +func (in *PowerVSPlatformStatus) DeepCopy() *PowerVSPlatformStatus { + if in == nil { + return nil + } + out := new(PowerVSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSServiceEndpoint) DeepCopyInto(out *PowerVSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSServiceEndpoint. +func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { + if in == nil { + return nil + } + out := new(PowerVSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) { + *out = *in + out.TokenClaimMapping = in.TokenClaimMapping + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping. +func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping { + if in == nil { + return nil + } + out := new(PrefixedClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileCustomizations) DeepCopyInto(out *ProfileCustomizations) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileCustomizations. +func (in *ProfileCustomizations) DeepCopy() *ProfileCustomizations { + if in == nil { + return nil + } + out := new(ProfileCustomizations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + out.ProjectRequestTemplate = in.ProjectRequestTemplate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromQLClusterCondition) DeepCopyInto(out *PromQLClusterCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromQLClusterCondition. +func (in *PromQLClusterCondition) DeepCopy() *PromQLClusterCondition { + if in == nil { + return nil + } + out := new(PromQLClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Proxy) DeepCopyInto(out *Proxy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy. +func (in *Proxy) DeepCopy() *Proxy { + if in == nil { + return nil + } + out := new(Proxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Proxy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyList) DeepCopyInto(out *ProxyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Proxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList. +func (in *ProxyList) DeepCopy() *ProxyList { + if in == nil { + return nil + } + out := new(ProxyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxySpec) DeepCopyInto(out *ProxySpec) { + *out = *in + if in.ReadinessEndpoints != nil { + in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.TrustedCA = in.TrustedCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec. +func (in *ProxySpec) DeepCopy() *ProxySpec { + if in == nil { + return nil + } + out := new(ProxySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus. +func (in *ProxyStatus) DeepCopy() *ProxyStatus { + if in == nil { + return nil + } + out := new(ProxyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrySources) DeepCopyInto(out *RegistrySources) { + *out = *in + if in.InsecureRegistries != nil { + in, out := &in.InsecureRegistries, &out.InsecureRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BlockedRegistries != nil { + in, out := &in.BlockedRegistries, &out.BlockedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedRegistries != nil { + in, out := &in.AllowedRegistries, &out.AllowedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerRuntimeSearchRegistries != nil { + in, out := &in.ContainerRuntimeSearchRegistries, &out.ContainerRuntimeSearchRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources. +func (in *RegistrySources) DeepCopy() *RegistrySources { + if in == nil { + return nil + } + out := new(RegistrySources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Release) DeepCopyInto(out *Release) { + *out = *in + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. +func (in *Release) DeepCopy() *Release { + if in == nil { + return nil + } + out := new(Release) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]Mirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. +func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { + if in == nil { + return nil + } + out := new(RepositoryDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.ClientCA = in.ClientCA + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredHSTSPolicy) DeepCopyInto(out *RequiredHSTSPolicy) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.DomainPatterns != nil { + in, out := &in.DomainPatterns, &out.DomainPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.MaxAge.DeepCopyInto(&out.MaxAge) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredHSTSPolicy. +func (in *RequiredHSTSPolicy) DeepCopy() *RequiredHSTSPolicy { + if in == nil { + return nil + } + out := new(RequiredHSTSPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduler) DeepCopyInto(out *Scheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler. +func (in *Scheduler) DeepCopy() *Scheduler { + if in == nil { + return nil + } + out := new(Scheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerList) DeepCopyInto(out *SchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Scheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList. +func (in *SchedulerList) DeepCopy() *SchedulerList { + if in == nil { + return nil + } + out := new(SchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) { + *out = *in + out.Policy = in.Policy + out.ProfileCustomizations = in.ProfileCustomizations + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec. +func (in *SchedulerSpec) DeepCopy() *SchedulerSpec { + if in == nil { + return nil + } + out := new(SchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus. +func (in *SchedulerStatus) DeepCopy() *SchedulerStatus { + if in == nil { + return nil + } + out := new(SchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference. +func (in *SecretNameReference) DeepCopy() *SecretNameReference { + if in == nil { + return nil + } + out := new(SecretNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { + *out = *in + out.CertInfo = in.CertInfo + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]NamedCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. +func (in *ServingInfo) DeepCopy() *ServingInfo { + if in == nil { + return nil + } + out := new(ServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureStore) DeepCopyInto(out *SignatureStore) { + *out = *in + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureStore. +func (in *SignatureStore) DeepCopy() *SignatureStore { + if in == nil { + return nil + } + out := new(SignatureStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.PersistentVolume = in.PersistentVolume + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSource) DeepCopyInto(out *StringSource) { + *out = *in + out.StringSourceSpec = in.StringSourceSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. +func (in *StringSource) DeepCopy() *StringSource { + if in == nil { + return nil + } + out := new(StringSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. +func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { + if in == nil { + return nil + } + out := new(StringSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. +func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { + if in == nil { + return nil + } + out := new(TLSProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { + *out = *in + if in.Old != nil { + in, out := &in.Old, &out.Old + *out = new(OldTLSProfile) + **out = **in + } + if in.Intermediate != nil { + in, out := &in.Intermediate, &out.Intermediate + *out = new(IntermediateTLSProfile) + **out = **in + } + if in.Modern != nil { + in, out := &in.Modern, &out.Modern + *out = new(ModernTLSProfile) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomTLSProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. +func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { + if in == nil { + return nil + } + out := new(TLSSecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateReference) DeepCopyInto(out *TemplateReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference. +func (in *TemplateReference) DeepCopy() *TemplateReference { + if in == nil { + return nil + } + out := new(TemplateReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestDetails) DeepCopyInto(out *TestDetails) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestDetails. +func (in *TestDetails) DeepCopy() *TestDetails { + if in == nil { + return nil + } + out := new(TestDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReporting) DeepCopyInto(out *TestReporting) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReporting. +func (in *TestReporting) DeepCopy() *TestReporting { + if in == nil { + return nil + } + out := new(TestReporting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingSpec) DeepCopyInto(out *TestReportingSpec) { + *out = *in + if in.TestsForFeatureGates != nil { + in, out := &in.TestsForFeatureGates, &out.TestsForFeatureGates + *out = make([]FeatureGateTests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingSpec. +func (in *TestReportingSpec) DeepCopy() *TestReportingSpec { + if in == nil { + return nil + } + out := new(TestReportingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingStatus) DeepCopyInto(out *TestReportingStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingStatus. +func (in *TestReportingStatus) DeepCopy() *TestReportingStatus { + if in == nil { + return nil + } + out := new(TestReportingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMapping. +func (in *TokenClaimMapping) DeepCopy() *TokenClaimMapping { + if in == nil { + return nil + } + out := new(TokenClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + out.Groups = in.Groups + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(TokenClaimOrExpressionMapping) + **out = **in + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings. +func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { + if in == nil { + return nil + } + out := new(TokenClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimOrExpressionMapping) DeepCopyInto(out *TokenClaimOrExpressionMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimOrExpressionMapping. +func (in *TokenClaimOrExpressionMapping) DeepCopy() *TokenClaimOrExpressionMapping { + if in == nil { + return nil + } + out := new(TokenClaimOrExpressionMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationCELRule) DeepCopyInto(out *TokenClaimValidationCELRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationCELRule. +func (in *TokenClaimValidationCELRule) DeepCopy() *TokenClaimValidationCELRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationCELRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { + *out = *in + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = new(TokenRequiredClaim) + **out = **in + } + out.CEL = in.CEL + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule. +func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeout != nil { + in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]TokenAudience, len(*in)) + copy(*out, *in) + } + out.CertificateAuthority = in.CertificateAuthority + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer. +func (in *TokenIssuer) DeepCopy() *TokenIssuer { + if in == nil { + return nil + } + out := new(TokenIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim. +func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { + if in == nil { + return nil + } + out := new(TokenRequiredClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenUserValidationRule) DeepCopyInto(out *TokenUserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenUserValidationRule. +func (in *TokenUserValidationRule) DeepCopy() *TokenUserValidationRule { + if in == nil { + return nil + } + out := new(TokenUserValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Update) DeepCopyInto(out *Update) { + *out = *in + if in.AcceptRisks != nil { + in, out := &in.AcceptRisks, &out.AcceptRisks + *out = make([]AcceptRisk, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update. +func (in *Update) DeepCopy() *Update { + if in == nil { + return nil + } + out := new(Update) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) { + *out = *in + in.StartedTime.DeepCopyInto(&out.StartedTime) + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory. +func (in *UpdateHistory) DeepCopy() *UpdateHistory { + if in == nil { + return nil + } + out := new(UpdateHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(UsernamePrefix) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping. +func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping { + if in == nil { + return nil + } + out := new(UsernameClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernamePrefix) DeepCopyInto(out *UsernamePrefix) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernamePrefix. +func (in *UsernamePrefix) DeepCopy() *UsernamePrefix { + if in == nil { + return nil + } + out := new(UsernamePrefix) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainHostGroup) DeepCopyInto(out *VSphereFailureDomainHostGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainHostGroup. +func (in *VSphereFailureDomainHostGroup) DeepCopy() *VSphereFailureDomainHostGroup { + if in == nil { + return nil + } + out := new(VSphereFailureDomainHostGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainRegionAffinity) DeepCopyInto(out *VSphereFailureDomainRegionAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainRegionAffinity. +func (in *VSphereFailureDomainRegionAffinity) DeepCopy() *VSphereFailureDomainRegionAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainRegionAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainZoneAffinity) DeepCopyInto(out *VSphereFailureDomainZoneAffinity) { + *out = *in + if in.HostGroup != nil { + in, out := &in.HostGroup, &out.HostGroup + *out = new(VSphereFailureDomainHostGroup) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainZoneAffinity. +func (in *VSphereFailureDomainZoneAffinity) DeepCopy() *VSphereFailureDomainZoneAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainZoneAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) { + *out = *in + if in.RegionAffinity != nil { + in, out := &in.RegionAffinity, &out.RegionAffinity + *out = new(VSphereFailureDomainRegionAffinity) + **out = **in + } + if in.ZoneAffinity != nil { + in, out := &in.ZoneAffinity, &out.ZoneAffinity + *out = new(VSphereFailureDomainZoneAffinity) + (*in).DeepCopyInto(*out) + } + in.Topology.DeepCopyInto(&out.Topology) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformFailureDomainSpec. +func (in *VSpherePlatformFailureDomainSpec) DeepCopy() *VSpherePlatformFailureDomainSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformFailureDomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformLoadBalancer) DeepCopyInto(out *VSpherePlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformLoadBalancer. +func (in *VSpherePlatformLoadBalancer) DeepCopy() *VSpherePlatformLoadBalancer { + if in == nil { + return nil + } + out := new(VSpherePlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformNodeNetworking) DeepCopyInto(out *VSpherePlatformNodeNetworking) { + *out = *in + in.External.DeepCopyInto(&out.External) + in.Internal.DeepCopyInto(&out.Internal) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworking. +func (in *VSpherePlatformNodeNetworking) DeepCopy() *VSpherePlatformNodeNetworking { + if in == nil { + return nil + } + out := new(VSpherePlatformNodeNetworking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformNodeNetworkingSpec) DeepCopyInto(out *VSpherePlatformNodeNetworkingSpec) { + *out = *in + if in.NetworkSubnetCIDR != nil { + in, out := &in.NetworkSubnetCIDR, &out.NetworkSubnetCIDR + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludeNetworkSubnetCIDR != nil { + in, out := &in.ExcludeNetworkSubnetCIDR, &out.ExcludeNetworkSubnetCIDR + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworkingSpec. +func (in *VSpherePlatformNodeNetworkingSpec) DeepCopy() *VSpherePlatformNodeNetworkingSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformNodeNetworkingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformSpec) DeepCopyInto(out *VSpherePlatformSpec) { + *out = *in + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]VSpherePlatformVCenterSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]VSpherePlatformFailureDomainSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodeNetworking.DeepCopyInto(&out.NodeNetworking) + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformSpec. +func (in *VSpherePlatformSpec) DeepCopy() *VSpherePlatformSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(VSpherePlatformLoadBalancer) + **out = **in + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformStatus. +func (in *VSpherePlatformStatus) DeepCopy() *VSpherePlatformStatus { + if in == nil { + return nil + } + out := new(VSpherePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformTopology) DeepCopyInto(out *VSpherePlatformTopology) { + *out = *in + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformTopology. +func (in *VSpherePlatformTopology) DeepCopy() *VSpherePlatformTopology { + if in == nil { + return nil + } + out := new(VSpherePlatformTopology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformVCenterSpec) DeepCopyInto(out *VSpherePlatformVCenterSpec) { + *out = *in + if in.Datacenters != nil { + in, out := &in.Datacenters, &out.Datacenters + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformVCenterSpec. +func (in *VSpherePlatformVCenterSpec) DeepCopy() *VSpherePlatformVCenterSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformVCenterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000..eb7c485e03 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,593 @@ +apiservers.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: apiservers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - KMSEncryption + - KMSEncryptionProvider + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: APIServer + Labels: {} + PluralName: apiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +authentications.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: authentications.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ExternalOIDC + - ExternalOIDCWithUIDAndExtraClaimMappings + - ExternalOIDCWithUpstreamParity + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Authentication + Labels: {} + PluralName: authentications + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +builds.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: builds.config.openshift.io + Capability: Build + Category: "" + FeatureGates: [] + FilenameOperatorName: openshift-controller-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Build + Labels: {} + PluralName: builds + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + +clusteroperators.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/497 + CRDName: clusteroperators.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterOperator + Labels: {} + PluralName: clusteroperators + PrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + Scope: Cluster + ShortNames: + - co + TopLevelFeatureGates: [] + Version: v1 + +clusterversions.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/495 + CRDName: clusterversions.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ClusterUpdateAcceptRisks + - ImageStreamImportMode + - SignatureStores + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterVersion + Labels: {} + PluralName: clusterversions + PrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +consoles.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: consoles.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Console + Labels: {} + PluralName: consoles + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +dnses.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: dnses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: DNS + Labels: {} + PluralName: dnses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +featuregates.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: featuregates.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: FeatureGate + Labels: {} + PluralName: featuregates + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +images.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: images.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ImageStreamImportMode + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Image + Labels: {} + PluralName: images + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagecontentpolicies.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/874 + CRDName: imagecontentpolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageContentPolicy + Labels: {} + PluralName: imagecontentpolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagedigestmirrorsets.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagedigestmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageDigestMirrorSet + Labels: {} + PluralName: imagedigestmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - idms + TopLevelFeatureGates: [] + Version: v1 + +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + +imagetagmirrorsets.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagetagmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageTagMirrorSet + Labels: {} + PluralName: imagetagmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - itms + TopLevelFeatureGates: [] + Version: v1 + +infrastructures.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: infrastructures.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AWSClusterHostedDNSInstall + - AWSDualStackInstall + - AzureClusterHostedDNSInstall + - AzureDualStackInstall + - DualReplica + - DyanmicServiceEndpointIBMCloud + - GCPClusterHostedDNSInstall + - HighlyAvailableArbiter + - HighlyAvailableArbiter+DualReplica + - NutanixMultiSubnets + - OnPremDNSRecords + - VSphereHostVMGroupZonal + - VSphereMultiNetworks + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Infrastructure + Labels: {} + PluralName: infrastructures + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +ingresses.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: ingresses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Ingress + Labels: {} + PluralName: ingresses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +insightsdatagathers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2448 + CRDName: insightsdatagathers.config.openshift.io + Capability: Insights + Category: "" + FeatureGates: + - InsightsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: false + KindName: InsightsDataGather + Labels: {} + PluralName: insightsdatagathers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - InsightsConfig + Version: v1 + +networks.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: networks.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: false + KindName: Network + Labels: {} + PluralName: networks + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +nodes.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1107 + CRDName: nodes.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - MinimumKubeletVersion + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Node + Labels: {} + PluralName: nodes + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +oauths.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: oauths.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: OAuth + Labels: {} + PluralName: oauths + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +operatorhubs.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: operatorhubs.config.openshift.io + Capability: marketplace + Category: "" + FeatureGates: [] + FilenameOperatorName: marketplace + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: OperatorHub + Labels: {} + PluralName: operatorhubs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +projects.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: projects.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Project + Labels: {} + PluralName: projects + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +proxies.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: proxies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: Proxy + Labels: {} + PluralName: proxies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +schedulers.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: schedulers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - HyperShiftOnlyDynamicResourceAllocation + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Scheduler + Labels: {} + PluralName: schedulers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..7f0018950a --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,3028 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AdmissionConfig = map[string]string{ + "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.", + "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.", +} + +func (AdmissionConfig) SwaggerDoc() map[string]string { + return map_AdmissionConfig +} + +var map_AdmissionPluginConfig = map[string]string{ + "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", + "location": "location is the path to a configuration file that contains the plugin's configuration", + "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", +} + +func (AdmissionPluginConfig) SwaggerDoc() map[string]string { + return map_AdmissionPluginConfig +} + +var map_AuditConfig = map[string]string{ + "": "AuditConfig holds configuration for the audit capabilities", + "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", + "auditFilePath": "All requests coming to the apiserver will be logged to this file.", + "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", + "maximumRetainedFiles": "Maximum number of old log files to retain.", + "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", + "policyFile": "policyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "logFormat": "Format of saved audits (legacy or json).", + "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", + "webHookMode": "Strategy for sending audit events (block or batch).", +} + +func (AuditConfig) SwaggerDoc() map[string]string { + return map_AuditConfig +} + +var map_CertInfo = map[string]string{ + "": "CertInfo relates a certificate with a private key", + "certFile": "certFile is a file containing a PEM-encoded certificate", + "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", +} + +func (CertInfo) SwaggerDoc() map[string]string { + return map_CertInfo +} + +var map_ClientConnectionOverrides = map[string]string{ + "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "contentType is the content type used when sending data to the server from this client.", + "qps": "qps controls the number of queries per second allowed for this connection.", + "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", +} + +func (ClientConnectionOverrides) SwaggerDoc() map[string]string { + return map_ClientConnectionOverrides +} + +var map_ConfigMapFileReference = map[string]string{ + "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "key": "key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", +} + +func (ConfigMapFileReference) SwaggerDoc() map[string]string { + return map_ConfigMapFileReference +} + +var map_ConfigMapNameReference = map[string]string{ + "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced config map", +} + +func (ConfigMapNameReference) SwaggerDoc() map[string]string { + return map_ConfigMapNameReference +} + +var map_DelegatedAuthentication = map[string]string{ + "": "DelegatedAuthentication allows authentication to be disabled.", + "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", +} + +func (DelegatedAuthentication) SwaggerDoc() map[string]string { + return map_DelegatedAuthentication +} + +var map_DelegatedAuthorization = map[string]string{ + "": "DelegatedAuthorization allows authorization to be disabled.", + "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", +} + +func (DelegatedAuthorization) SwaggerDoc() map[string]string { + return map_DelegatedAuthorization +} + +var map_EtcdConnectionInfo = map[string]string{ + "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + "urls": "urls are the URLs for etcd", + "ca": "ca is a file containing trusted roots for the etcd server certificates", +} + +func (EtcdConnectionInfo) SwaggerDoc() map[string]string { + return map_EtcdConnectionInfo +} + +var map_EtcdStorageConfig = map[string]string{ + "storagePrefix": "storagePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", +} + +func (EtcdStorageConfig) SwaggerDoc() map[string]string { + return map_EtcdStorageConfig +} + +var map_GenericAPIServerConfig = map[string]string{ + "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", + "servingInfo": "servingInfo describes how to start serving", + "corsAllowedOrigins": "corsAllowedOrigins", + "auditConfig": "auditConfig describes how to configure audit information", + "storageConfig": "storageConfig contains information about how to use", + "admission": "admissionConfig holds information about how to configure admission.", +} + +func (GenericAPIServerConfig) SwaggerDoc() map[string]string { + return map_GenericAPIServerConfig +} + +var map_GenericControllerConfig = map[string]string{ + "": "GenericControllerConfig provides information to configure a controller", + "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", + "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", + "authentication": "authentication allows configuration of authentication for the endpoints", + "authorization": "authorization allows configuration of authentication for the endpoints", +} + +func (GenericControllerConfig) SwaggerDoc() map[string]string { + return map_GenericControllerConfig +} + +var map_HTTPServingInfo = map[string]string{ + "": "HTTPServingInfo holds configuration for serving HTTP", + "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", +} + +func (HTTPServingInfo) SwaggerDoc() map[string]string { + return map_HTTPServingInfo +} + +var map_KubeClientConfig = map[string]string{ + "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", + "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.", +} + +func (KubeClientConfig) SwaggerDoc() map[string]string { + return map_KubeClientConfig +} + +var map_LeaderElection = map[string]string{ + "": "LeaderElection provides information to elect a leader", + "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", + "namespace": "namespace indicates which namespace the resource is in", + "name": "name indicates what name to use for the resource", + "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", + "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", + "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", +} + +func (LeaderElection) SwaggerDoc() map[string]string { + return map_LeaderElection +} + +var map_MaxAgePolicy = map[string]string{ + "": "MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy", + "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced.", + "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced.", +} + +func (MaxAgePolicy) SwaggerDoc() map[string]string { + return map_MaxAgePolicy +} + +var map_NamedCertificate = map[string]string{ + "": "NamedCertificate specifies a certificate/key, and the names it should be served for", + "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", +} + +func (NamedCertificate) SwaggerDoc() map[string]string { + return map_NamedCertificate +} + +var map_RemoteConnectionInfo = map[string]string{ + "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "url is the remote URL to connect to", + "ca": "ca is the CA for verifying TLS connections", +} + +func (RemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_RemoteConnectionInfo +} + +var map_RequiredHSTSPolicy = map[string]string{ + "namespaceSelector": "namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything.", + "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.", + "maxAge": "maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client.", + "preloadPolicy": "preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent).", + "includeSubDomainsPolicy": "includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com", +} + +func (RequiredHSTSPolicy) SwaggerDoc() map[string]string { + return map_RequiredHSTSPolicy +} + +var map_SecretNameReference = map[string]string{ + "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced secret", +} + +func (SecretNameReference) SwaggerDoc() map[string]string { + return map_SecretNameReference +} + +var map_ServingInfo = map[string]string{ + "": "ServingInfo holds information about serving web pages", + "bindAddress": "bindAddress is the ip:port to serve on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", +} + +func (ServingInfo) SwaggerDoc() map[string]string { + return map_ServingInfo +} + +var map_StringSource = map[string]string{ + "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", +} + +func (StringSource) SwaggerDoc() map[string]string { + return map_StringSource +} + +var map_StringSourceSpec = map[string]string{ + "": "StringSourceSpec specifies a string value, or external location", + "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "keyFile references a file containing the key to use to decrypt the value.", +} + +func (StringSourceSpec) SwaggerDoc() map[string]string { + return map_StringSourceSpec +} + +var map_APIServer = map[string]string{ + "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (APIServer) SwaggerDoc() map[string]string { + return map_APIServer +} + +var map_APIServerEncryption = map[string]string{ + "": "APIServerEncryption is used to encrypt sensitive resources on the cluster.", + "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", + "kms": "kms defines the configuration for the external KMS instance that manages the encryption keys, when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an externally configured KMS instance.\n\nThe Key Management Service (KMS) instance provides symmetric encryption and is responsible for managing the lifecyle of the encryption keys outside of the control plane. This allows integration with an external provider to manage the data encryption keys securely.", +} + +func (APIServerEncryption) SwaggerDoc() map[string]string { + return map_APIServerEncryption +} + +var map_APIServerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (APIServerList) SwaggerDoc() map[string]string { + return map_APIServerList +} + +var map_APIServerNamedServingCert = map[string]string{ + "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.", + "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.", + "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.", +} + +func (APIServerNamedServingCert) SwaggerDoc() map[string]string { + return map_APIServerNamedServingCert +} + +var map_APIServerServingCerts = map[string]string{ + "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", +} + +func (APIServerServingCerts) SwaggerDoc() map[string]string { + return map_APIServerServingCerts +} + +var map_APIServerSpec = map[string]string{ + "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", + "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", + "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", + "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nWhen omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is the Intermediate profile.", + "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", +} + +func (APIServerSpec) SwaggerDoc() map[string]string { + return map_APIServerSpec +} + +var map_Audit = map[string]string{ + "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly.\n\nIf unset, the 'Default' profile is used as the default.", + "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.", +} + +func (Audit) SwaggerDoc() map[string]string { + return map_Audit +} + +var map_AuditCustomRule = map[string]string{ + "": "AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile.", + "group": "group is a name of group a request user must be member of in order to this profile to apply.", + "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nIf unset, the 'Default' profile is used as the default.", +} + +func (AuditCustomRule) SwaggerDoc() map[string]string { + return map_AuditCustomRule +} + +var map_Authentication = map[string]string{ + "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationSpec = map[string]string{ + "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", + "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", + "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", + "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", + "oidcProviders": "oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", +} + +func (AuthenticationSpec) SwaggerDoc() map[string]string { + return map_AuthenticationSpec +} + +var map_AuthenticationStatus = map[string]string{ + "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", + "oidcClients": "oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_DeprecatedWebhookTokenAuthenticator = map[string]string{ + "": "deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.", + "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.", +} + +func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_DeprecatedWebhookTokenAuthenticator +} + +var map_ExtraMapping = map[string]string{ + "": "ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token.", + "key": "key is a required field that specifies the string to use as the extra attribute key.\n\nkey must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. key must contain the '/' character, separating the domain and path characters. key must not be empty.\n\nThe domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. It must not exceed 253 characters in length. It must start and end with an alphanumeric character. It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, \"kubernetes.io\", \"k8s.io\", and \"openshift.io\".\n\nThe path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length.", + "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 1024 characters in length. valueExpression must not be empty.", +} + +func (ExtraMapping) SwaggerDoc() map[string]string { + return map_ExtraMapping +} + +var map_OIDCClientConfig = map[string]string{ + "": "OIDCClientConfig configures how platform clients interact with identity providers as an authentication method.", + "componentName": "componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode.\n\nIt is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running.\n\nIt is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "clientID": "clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode.\n\nclientID must not be an empty string (\"\").", + "clientSecret": "clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider.\n\nWhen not specified, no client secret will be used when making authentication requests to the identity provider.\n\nWhen specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field.\n\nThe client secret will be used when making authentication requests to the identity provider.\n\nPublic clients do not require a client secret but private clients do require a client secret to work with the identity provider.", + "extraScopes": "extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes.\n\nWhen omitted, no additional scopes are requested.", +} + +func (OIDCClientConfig) SwaggerDoc() map[string]string { + return map_OIDCClientConfig +} + +var map_OIDCClientReference = map[string]string{ + "": "OIDCClientReference is a reference to a platform component client configuration.", + "oidcProviderName": "oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with.\n\noidcProviderName must not be an empty string (\"\").", + "issuerURL": "issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against.\n\nissuerURL must use the 'https' scheme.", + "clientID": "clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider.\n\nclientID must not be empty.", +} + +func (OIDCClientReference) SwaggerDoc() map[string]string { + return map_OIDCClientReference +} + +var map_OIDCClientStatus = map[string]string{ + "": "OIDCClientStatus represents the current state of platform components and how they interact with the configured identity providers.", + "componentName": "componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running.\n\nIt is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "currentOIDCClients": "currentOIDCClients is an optional list of clients that the component is currently using.\n\nEntries must have unique issuerURL/clientID pairs.", + "consumingUsers": "consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret.\n\nconsumingUsers must not exceed 5 entries.", + "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", +} + +func (OIDCClientStatus) SwaggerDoc() map[string]string { + return map_OIDCClientStatus +} + +var map_OIDCProvider = map[string]string{ + "name": "name is a required field that configures the unique human-readable identifier associated with the identity provider. It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics.\n\nname must not be an empty string (\"\").", + "issuer": "issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server.", + "oidcClients": "oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs.", + "claimMappings": "claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity.", + "claimValidationRules": "claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider.\n\nValidation rules are joined via an AND operation.", + "userValidationRules": "userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. If any rule in the chain of rules evaluates to 'false', authentication will fail. When specified, at least one rule must be specified and no more than 64 rules may be specified.", +} + +func (OIDCProvider) SwaggerDoc() map[string]string { + return map_OIDCProvider +} + +var map_PrefixedClaimMapping = map[string]string{ + "": "PrefixedClaimMapping configures a claim mapping that allows for an optional prefix.", + "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted (\"\"), no prefix is applied to the cluster identity attribute.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", +} + +func (PrefixedClaimMapping) SwaggerDoc() map[string]string { + return map_PrefixedClaimMapping +} + +var map_TokenClaimMapping = map[string]string{ + "": "TokenClaimMapping allows specifying a JWT token claim to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.", +} + +func (TokenClaimMapping) SwaggerDoc() map[string]string { + return map_TokenClaimMapping +} + +var map_TokenClaimMappings = map[string]string{ + "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", + "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.\n\nWhen referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (',').\n\nFor example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", + "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time.\n\nThe current default is to use the 'sub' claim.", + "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity.\n\nkey values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided.", +} + +func (TokenClaimMappings) SwaggerDoc() map[string]string { + return map_TokenClaimMappings +} + +var map_TokenClaimOrExpressionMapping = map[string]string{ + "": "TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is an optional field for specifying the JWT token claim that is used in the mapping. The value of this claim will be assigned to the field in which this mapping is associated.\n\nPrecisely one of claim or expression must be set. claim must not be specified when expression is set. When specified, claim must be at least 1 character in length and must not exceed 256 characters in length.", + "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length.", +} + +func (TokenClaimOrExpressionMapping) SwaggerDoc() map[string]string { + return map_TokenClaimOrExpressionMapping +} + +var map_TokenClaimValidationCELRule = map[string]string{ + "expression": "expression is a CEL expression evaluated against token claims. expression is required, must be at least 1 character in length and must not exceed 1024 characters. The expression must return a boolean value where 'true' signals a valid token and 'false' an invalid one.", + "message": "message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters.", +} + +func (TokenClaimValidationCELRule) SwaggerDoc() map[string]string { + return map_TokenClaimValidationCELRule +} + +var map_TokenClaimValidationRule = map[string]string{ + "": "TokenClaimValidationRule represents a validation rule based on token claims. If type is RequiredClaim, requiredClaim must be set. If Type is CEL, CEL must be set and RequiredClaim must be omitted.", + "type": "type is an optional field that configures the type of the validation rule.\n\nAllowed values are \"RequiredClaim\" and \"CEL\".\n\nWhen set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value.\n\nWhen set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression.", + "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value. This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider.", + "cel": "cel holds the CEL expression and message for validation. Must be set when Type is \"CEL\", and forbidden otherwise.", +} + +func (TokenClaimValidationRule) SwaggerDoc() map[string]string { + return map_TokenClaimValidationRule +} + +var map_TokenIssuer = map[string]string{ + "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nMust be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user.", + "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", + "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", + "discoveryURL": "discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` as \"{issuerURL}/.well-known/openid-configuration\".\n\nThe discoveryURL must be a valid absolute HTTPS URL. It must not contain query parameters, user information, or fragments. Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters.", +} + +func (TokenIssuer) SwaggerDoc() map[string]string { + return map_TokenIssuer +} + +var map_TokenRequiredClaim = map[string]string{ + "claim": "claim is a required field that configures the name of the required claim. When taken from the JWT claims, claim must be a string value.\n\nclaim must not be an empty string (\"\").", + "requiredValue": "requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. If the value in the JWT claims does not match, the token will be rejected for authentication.\n\nrequiredValue must not be an empty string (\"\").", +} + +func (TokenRequiredClaim) SwaggerDoc() map[string]string { + return map_TokenRequiredClaim +} + +var map_TokenUserValidationRule = map[string]string{ + "": "TokenUserValidationRule provides a CEL-based rule used to validate a token subject. Each rule contains a CEL expression that is evaluated against the token’s claims.", + "expression": "expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc.\n\nThe expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. expression must be at least 1 character in length and must not exceed 1024 characters.", + "message": "message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters.", +} + +func (TokenUserValidationRule) SwaggerDoc() map[string]string { + return map_TokenUserValidationRule +} + +var map_UsernameClaimMapping = map[string]string{ + "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", + "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim.\n\nThe prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'.\n\nAs an example, consider the following scenario:\n\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefix": "prefix configures the prefix that should be prepended to the value of the JWT claim.\n\nprefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise.", +} + +func (UsernameClaimMapping) SwaggerDoc() map[string]string { + return map_UsernameClaimMapping +} + +var map_UsernamePrefix = map[string]string{ + "": "UsernamePrefix configures the string that should be used as a prefix for username claim mappings.", + "prefixString": "prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes.\n\nprefixString must not be an empty string (\"\").", +} + +func (UsernamePrefix) SwaggerDoc() map[string]string { + return map_UsernamePrefix +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", + "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +var map_Build = map[string]string{ + "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user-settable values for the build controller configuration", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildDefaults = map[string]string{ + "defaultProxy": "defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "resources defines resource requirements to execute the build.", +} + +func (BuildDefaults) SwaggerDoc() map[string]string { + return map_BuildDefaults +} + +var map_BuildList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildOverrides = map[string]string{ + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", +} + +func (BuildOverrides) SwaggerDoc() map[string]string { + return map_BuildOverrides +} + +var map_BuildSpec = map[string]string{ + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "buildDefaults controls the default information for Builds", + "buildOverrides": "buildOverrides controls override settings for builds", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_ImageLabel = map[string]string{ + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ClusterImagePolicy = map[string]string{ + "": "ClusterImagePolicy holds cluster-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the configuration for the cluster image policy.", + "status": "status contains the observed state of the resource.", +} + +func (ClusterImagePolicy) SwaggerDoc() map[string]string { + return map_ClusterImagePolicy +} + +var map_ClusterImagePolicyList = map[string]string{ + "": "ClusterImagePolicyList is a list of ClusterImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterImagePolices", +} + +func (ClusterImagePolicyList) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyList +} + +var map_ClusterImagePolicySpec = map[string]string{ + "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ClusterImagePolicySpec) SwaggerDoc() map[string]string { + return map_ClusterImagePolicySpec +} + +var map_ClusterImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource.", +} + +func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyStatus +} + +var map_ClusterOperator = map[string]string{ + "": "ClusterOperator holds the status of a core or optional OpenShift component managed by the Cluster Version Operator (CVO). This object is used by operators to convey their state to the rest of the cluster. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds configuration that could apply to any operator.", + "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", +} + +func (ClusterOperator) SwaggerDoc() map[string]string { + return map_ClusterOperator +} + +var map_ClusterOperatorList = map[string]string{ + "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ClusterOperatorList) SwaggerDoc() map[string]string { + return map_ClusterOperatorList +} + +var map_ClusterOperatorSpec = map[string]string{ + "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".", +} + +func (ClusterOperatorSpec) SwaggerDoc() map[string]string { + return map_ClusterOperatorSpec +} + +var map_ClusterOperatorStatus = map[string]string{ + "": "ClusterOperatorStatus provides information about the status of the operator.", + "conditions": "conditions describes the state of the operator's managed and monitored components.", + "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.", + "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces", + "extension": "extension contains any additional status information specific to the operator which owns this status object.", +} + +func (ClusterOperatorStatus) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatus +} + +var map_ClusterOperatorStatusCondition = map[string]string{ + "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.", + "type": "type specifies the aspect reported by this condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", + "reason": "reason is the CamelCase reason for the condition's current status.", + "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", +} + +func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatusCondition +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "group": "group of the referent.", + "resource": "resource of the referent.", + "namespace": "namespace of the referent.", + "name": "name of the referent.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_OperandVersion = map[string]string{ + "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.", + "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0", +} + +func (OperandVersion) SwaggerDoc() map[string]string { + return map_OperandVersion +} + +var map_AcceptRisk = map[string]string{ + "": "AcceptRisk represents a risk that is considered acceptable.", + "name": "name is the name of the acceptable risk. It must be a non-empty string and must not exceed 256 characters.", +} + +func (AcceptRisk) SwaggerDoc() map[string]string { + return map_AcceptRisk +} + +var map_ClusterCondition = map[string]string{ + "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", + "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", + "promql": "promql represents a cluster condition based on PromQL.", +} + +func (ClusterCondition) SwaggerDoc() map[string]string { + return map_ClusterCondition +} + +var map_ClusterVersion = map[string]string{ + "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", + "status": "status contains information about the available updates and any in-progress updates.", +} + +func (ClusterVersion) SwaggerDoc() map[string]string { + return map_ClusterVersion +} + +var map_ClusterVersionCapabilitiesSpec = map[string]string{ + "": "ClusterVersionCapabilitiesSpec selects the managed set of optional, core cluster components.", + "baselineCapabilitySet": "baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent.", + "additionalEnabledCapabilities": "additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set.", +} + +func (ClusterVersionCapabilitiesSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionCapabilitiesSpec +} + +var map_ClusterVersionCapabilitiesStatus = map[string]string{ + "": "ClusterVersionCapabilitiesStatus describes the state of optional, core cluster components.", + "enabledCapabilities": "enabledCapabilities lists all the capabilities that are currently managed.", + "knownCapabilities": "knownCapabilities lists all the capabilities known to the current cluster.", +} + +func (ClusterVersionCapabilitiesStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionCapabilitiesStatus +} + +var map_ClusterVersionList = map[string]string{ + "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ClusterVersionList) SwaggerDoc() map[string]string { + return map_ClusterVersionList +} + +var map_ClusterVersionSpec = map[string]string{ + "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", + "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted if the previous version is within the current minor version. Not all rollbacks will succeed, and some may unrecoverably break the cluster.", + "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", + "channel": "channel is an identifier for explicitly requesting a non-default set of updates to be applied to this cluster. The default channel will contain stable updates that are appropriate for production clusters.", + "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", + "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", + "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", +} + +func (ClusterVersionSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionSpec +} + +var map_ClusterVersionStatus = map[string]string{ + "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", + "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", + "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", + "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", + "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", + "capabilities": "capabilities describes the state of optional, core cluster components.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", + "conditionalUpdateRisks": "conditionalUpdateRisks contains the list of risks associated with conditionalUpdates. When performing a conditional update, all its associated risks will be compared with the set of accepted risks in the spec.desiredUpdate.acceptRisks field. If all risks for a conditional update are included in the spec.desiredUpdate.acceptRisks set, the conditional update can proceed, otherwise it is blocked. The risk names in the list must be unique. conditionalUpdateRisks must not contain more than 500 entries.", +} + +func (ClusterVersionStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionStatus +} + +var map_ComponentOverride = map[string]string{ + "": "ComponentOverride allows overriding cluster version operator's behavior for a component.", + "kind": "kind indentifies which object to override.", + "group": "group identifies the API group that the kind is in.", + "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.", + "name": "name is the component's name.", + "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false", +} + +func (ComponentOverride) SwaggerDoc() map[string]string { + return map_ComponentOverride +} + +var map_ConditionalUpdate = map[string]string{ + "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", + "release": "release is the target of the update.", + "riskNames": "riskNames represents the set of the names of conditionalUpdateRisks that are relevant to this update for some clusters. The Applies condition of each conditionalUpdateRisks entry declares if that risk applies to this cluster. A conditional update is accepted only if each of its risks either does not apply to the cluster or is considered acceptable by the cluster administrator. The latter means that the risk names are included in value of the spec.desiredUpdate.acceptRisks field. Entries must be unique and must not exceed 256 characters. riskNames must not contain more than 500 entries.", + "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", + "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Recommended, for whether the update is recommended for the current cluster.", +} + +func (ConditionalUpdate) SwaggerDoc() map[string]string { + return map_ConditionalUpdate +} + +var map_ConditionalUpdateRisk = map[string]string{ + "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", + "conditions": "conditions represents the observations of the conditional update risk's current status. Known types are: * Applies, for whether the risk applies to the current cluster. The condition's types in the list must be unique. conditions must not contain more than one entry.", + "url": "url contains information about this risk.", + "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", + "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", + "matchingRules": "matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended.", +} + +func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { + return map_ConditionalUpdateRisk +} + +var map_PromQLClusterCondition = map[string]string{ + "": "PromQLClusterCondition represents a cluster condition based on PromQL.", + "promql": "promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", +} + +func (PromQLClusterCondition) SwaggerDoc() map[string]string { + return map_PromQLClusterCondition +} + +var map_Release = map[string]string{ + "": "Release represents an OpenShift release image and associated metadata.", + "architecture": "architecture is an optional field that indicates the value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. Valid values are 'Multi' and empty.", + "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", + "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", +} + +func (Release) SwaggerDoc() map[string]string { + return map_Release +} + +var map_SignatureStore = map[string]string{ + "": "SignatureStore represents the URL of custom Signature Store", + "url": "url contains the upstream custom signature store URL. url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. This must be provided and cannot be empty.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the signature store is not honored. If the specified ca data is not valid, the signature store is not honored. If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. The namespace for this config map is openshift-config.", +} + +func (SignatureStore) SwaggerDoc() map[string]string { + return map_SignatureStore +} + +var map_Update = map[string]string{ + "": "Update represents an administrator update request.", + "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", + "version": "version is a semantic version identifying the update version. version is required if architecture is specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", + "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, architecture cannot be specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", + "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks that are designed to keep your cluster safe. Only use this if: * you are testing unsigned release images in short-lived test clusters or * you are working around a known bug in the cluster-version\n operator and you have verified the authenticity of the provided\n image yourself.\nThe provided image will run with full administrative access to the cluster. Do not use this flag with images that come from unknown or potentially malicious sources.", + "acceptRisks": "acceptRisks is an optional set of names of conditional update risks that are considered acceptable. A conditional update is performed only if all of its risks are acceptable. This list may contain entries that apply to current, previous or future updates. The entries therefore may not map directly to a risk in .status.conditionalUpdateRisks. acceptRisks must not contain more than 1000 entries. Entries in this list must be unique.", +} + +func (Update) SwaggerDoc() map[string]string { + return map_Update +} + +var map_UpdateHistory = map[string]string{ + "": "UpdateHistory is a single attempted update to the cluster.", + "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).", + "startedTime": "startedTime is the time at which the update was started.", + "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", + "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", + "image": "image is a container image location that contains the update. This value is always populated.", + "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may mention an Upgradeable=False or missing signature that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", +} + +func (UpdateHistory) SwaggerDoc() map[string]string { + return map_UpdateHistory +} + +var map_Console = map[string]string{ + "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleAuthentication = map[string]string{ + "": "ConsoleAuthentication defines a list of optional configuration for console authentication.", + "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.", +} + +func (ConsoleAuthentication) SwaggerDoc() map[string]string { + return map_ConsoleAuthentication +} + +var map_ConsoleList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleList) SwaggerDoc() map[string]string { + return map_ConsoleList +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", + "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_AWSDNSSpec = map[string]string{ + "": "AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.", + "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.", +} + +func (AWSDNSSpec) SwaggerDoc() map[string]string { + return map_AWSDNSSpec +} + +var map_DNS = map[string]string{ + "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSPlatformSpec = map[string]string{ + "": "DNSPlatformSpec holds cloud-provider-specific configuration for DNS administration.", + "type": "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\".\n\nIndividual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults.", + "aws": "aws contains DNS configuration specific to the Amazon Web Services cloud provider.", +} + +func (DNSPlatformSpec) SwaggerDoc() map[string]string { + return map_DNSPlatformSpec +} + +var map_DNSSpec = map[string]string{ + "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", + "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", + "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", + "platform": "platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSZone = map[string]string{ + "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.", + "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get", + "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options", +} + +func (DNSZone) SwaggerDoc() map[string]string { + return map_DNSZone +} + +var map_CustomFeatureGates = map[string]string{ + "enabled": "enabled is a list of all feature gates that you want to force on", + "disabled": "disabled is a list of all feature gates that you want to force off", +} + +func (CustomFeatureGates) SwaggerDoc() map[string]string { + return map_CustomFeatureGates +} + +var map_FeatureGate = map[string]string{ + "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (FeatureGate) SwaggerDoc() map[string]string { + return map_FeatureGate +} + +var map_FeatureGateAttributes = map[string]string{ + "name": "name is the name of the FeatureGate.", +} + +func (FeatureGateAttributes) SwaggerDoc() map[string]string { + return map_FeatureGateAttributes +} + +var map_FeatureGateDetails = map[string]string{ + "version": "version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field.", + "enabled": "enabled is a list of all feature gates that are enabled in the cluster for the named version.", + "disabled": "disabled is a list of all feature gates that are disabled in the cluster for the named version.", +} + +func (FeatureGateDetails) SwaggerDoc() map[string]string { + return map_FeatureGateDetails +} + +var map_FeatureGateList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (FeatureGateList) SwaggerDoc() map[string]string { + return map_FeatureGateList +} + +var map_FeatureGateSelection = map[string]string{ + "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.", + "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.", +} + +func (FeatureGateSelection) SwaggerDoc() map[string]string { + return map_FeatureGateSelection +} + +var map_FeatureGateStatus = map[string]string{ + "conditions": "conditions represent the observations of the current state. Known .status.conditions.type are: \"DeterminationDegraded\"", + "featureGates": "featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list.", +} + +func (FeatureGateStatus) SwaggerDoc() map[string]string { + return map_FeatureGateStatus +} + +var map_Image = map[string]string{ + "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageSpec = map[string]string{ + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", + "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", + "imageStreamImportMode": "imageStreamImportMode controls the import mode behaviour of imagestreams. It can be set to `Legacy` or `PreserveOriginal` or the empty string. If this value is specified, this setting is applied to all newly created imagestreams which do not have the value set. `Legacy` indicates that the legacy behaviour should be used. For manifest lists, the legacy behaviour will discard the manifest list and import a single sub-manifest. In this case, the platform is chosen in the following order of priority: 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, the manifest list and all its sub-manifests will be imported. When empty, the behaviour will be decided based on the payload type advertised by the ClusterVersion status, i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal.", +} + +func (ImageSpec) SwaggerDoc() map[string]string { + return map_ImageSpec +} + +var map_ImageStatus = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "imageStreamImportMode": "imageStreamImportMode controls the import mode behaviour of imagestreams. It can be `Legacy` or `PreserveOriginal`. `Legacy` indicates that the legacy behaviour should be used. For manifest lists, the legacy behaviour will discard the manifest list and import a single sub-manifest. In this case, the platform is chosen in the following order of priority: 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, the manifest list and all its sub-manifests will be imported. This value will be reconciled based on either the spec value or if no spec value is specified, the image registry operator would look at the ClusterVersion status to determine the payload type and set the import mode accordingly, i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal.", +} + +func (ImageStatus) SwaggerDoc() map[string]string { + return map_ImageStatus +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RegistrySources = map[string]string{ + "": "RegistrySources holds cluster-wide information about how to handle the registries config.", + "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", + "blockedRegistries": "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "allowedRegistries": "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "containerRuntimeSearchRegistries": "containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.", +} + +func (RegistrySources) SwaggerDoc() map[string]string { + return map_RegistrySources +} + +var map_ImageContentPolicy = map[string]string{ + "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", +} + +func (ImageContentPolicy) SwaggerDoc() map[string]string { + return map_ImageContentPolicy +} + +var map_ImageContentPolicyList = map[string]string{ + "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageContentPolicyList) SwaggerDoc() map[string]string { + return map_ImageContentPolicyList +} + +var map_ImageContentPolicySpec = map[string]string{ + "": "ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.", + "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\".\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", +} + +func (ImageContentPolicySpec) SwaggerDoc() map[string]string { + return map_ImageContentPolicySpec +} + +var map_RepositoryDigestMirrors = map[string]string{ + "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source is the repository that users refer to, e.g. in image pull specifications.", + "allowMirrorByTags": "allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue.", + "mirrors": "mirrors is zero or more repositories that may also contain the same images. If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", +} + +func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { + return map_RepositoryDigestMirrors +} + +var map_ImageDigestMirrorSet = map[string]string{ + "": "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImageDigestMirrorSet) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSet +} + +var map_ImageDigestMirrorSetList = map[string]string{ + "": "ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageDigestMirrorSetList) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSetList +} + +var map_ImageDigestMirrorSetSpec = map[string]string{ + "": "ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD.", + "imageDigestMirrors": "imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using \"ImageTagMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order.", +} + +func (ImageDigestMirrorSetSpec) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSetSpec +} + +var map_ImageDigestMirrors = map[string]string{ + "": "ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", +} + +func (ImageDigestMirrors) SwaggerDoc() map[string]string { + return map_ImageDigestMirrors +} + +var map_ImagePolicy = map[string]string{ + "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImagePolicy) SwaggerDoc() map[string]string { + return map_ImagePolicy +} + +var map_ImagePolicyFulcioCAWithRekorRootOfTrust = map[string]string{ + "": "ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", + "fulcioSubject": "fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (ImagePolicyFulcioCAWithRekorRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyFulcioCAWithRekorRootOfTrust +} + +var map_ImagePolicyList = map[string]string{ + "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ImagePolicies", +} + +func (ImagePolicyList) SwaggerDoc() map[string]string { + return map_ImagePolicyList +} + +var map_ImagePolicyPKIRootOfTrust = map[string]string{ + "": "ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (ImagePolicyPKIRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPKIRootOfTrust +} + +var map_ImagePolicyPublicKeyRootOfTrust = map[string]string{ + "": "ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key.", + "keyData": "keyData is a required field contains inline base64-encoded data for the PEM format public key. keyData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", +} + +func (ImagePolicyPublicKeyRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPublicKeyRootOfTrust +} + +var map_ImagePolicySpec = map[string]string{ + "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ImagePolicySpec) SwaggerDoc() map[string]string { + return map_ImagePolicySpec +} + +var map_ImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource. condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid.", +} + +func (ImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ImagePolicyStatus +} + +var map_ImageSigstoreVerificationPolicy = map[string]string{ + "": "ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated.", + "signedIdentity": "signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", +} + +func (ImageSigstoreVerificationPolicy) SwaggerDoc() map[string]string { + return map_ImageSigstoreVerificationPolicy +} + +var map_PKICertificateSubject = map[string]string{ + "": "PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "email": "email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. The email must be a valid email address and at most 320 characters in length.", + "hostname": "hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk.", +} + +func (PKICertificateSubject) SwaggerDoc() map[string]string { + return map_PKICertificateSubject +} + +var map_PolicyFulcioSubject = map[string]string{ + "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", + "oidcIssuer": "oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", + "signedEmail": "signedEmail is a required field holds the email address that the Fulcio certificate is issued for. The signedEmail must be a valid email address and at most 320 characters in length. Example: \"expected-signing-user@example.com\"", +} + +func (PolicyFulcioSubject) SwaggerDoc() map[string]string { + return map_PolicyFulcioSubject +} + +var map_PolicyIdentity = map[string]string{ + "": "PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is \"MatchRepoDigestOrExact\".", + "matchPolicy": "matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. Allowed values are \"MatchRepoDigestOrExact\", \"MatchRepository\", \"ExactRepository\", \"RemapIdentity\". When omitted, the default value is \"MatchRepoDigestOrExact\". When set to \"MatchRepoDigestOrExact\", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. When set to \"MatchRepository\", the identity in the signature must be in the same repository as the image identity. When set to \"ExactRepository\", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by \"repository\". When set to \"RemapIdentity\", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the \"prefix\" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.", + "exactRepository": "exactRepository specifies the repository that must be exactly matched by the identity in the signature. exactRepository is required if matchPolicy is set to \"ExactRepository\". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity.", + "remapIdentity": "remapIdentity specifies the prefix remapping rule for verifying image identity. remapIdentity is required if matchPolicy is set to \"RemapIdentity\". It is used to verify that the signature claims a different registry/repository prefix than the original image.", +} + +func (PolicyIdentity) SwaggerDoc() map[string]string { + return map_PolicyIdentity +} + +var map_PolicyMatchExactRepository = map[string]string{ + "repository": "repository is the reference of the image identity to be matched. repository is required if matchPolicy is set to \"ExactRepository\". The value should be a repository name (by omitting the tag or digest) in a registry implementing the \"Docker Registry HTTP API V2\". For example, docker.io/library/busybox", +} + +func (PolicyMatchExactRepository) SwaggerDoc() map[string]string { + return map_PolicyMatchExactRepository +} + +var map_PolicyMatchRemapIdentity = map[string]string{ + "prefix": "prefix is required if matchPolicy is set to \"RemapIdentity\". prefix is the prefix of the image identity to be matched. If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", + "signedPrefix": "signedPrefix is required if matchPolicy is set to \"RemapIdentity\". signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as \"prefix\". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", +} + +func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { + return map_PolicyMatchRemapIdentity +} + +var map_PolicyRootOfTrust = map[string]string{ + "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", + "policyType": "policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. Allowed values are \"PublicKey\", \"FulcioCAWithRekor\", and \"PKI\". When set to \"PublicKey\", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. When set to \"FulcioCAWithRekor\", the policy is based on the Fulcio certification and incorporates a Rekor verification. When set to \"PKI\", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI).", + "publicKey": "publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. publicKey is required when policyType is PublicKey, and forbidden otherwise.", + "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", + "pki": "pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. pki is required when policyType is PKI, and forbidden otherwise.", +} + +func (PolicyRootOfTrust) SwaggerDoc() map[string]string { + return map_PolicyRootOfTrust +} + +var map_ImageTagMirrorSet = map[string]string{ + "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImageTagMirrorSet) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSet +} + +var map_ImageTagMirrorSetList = map[string]string{ + "": "ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageTagMirrorSetList) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSetList +} + +var map_ImageTagMirrorSetSpec = map[string]string{ + "": "ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD.", + "imageTagMirrors": "imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order.", +} + +func (ImageTagMirrorSetSpec) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSetSpec +} + +var map_ImageTagMirrors = map[string]string{ + "": "ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using \"ImageDigestMirrorSet\" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", +} + +func (ImageTagMirrors) SwaggerDoc() map[string]string { + return map_ImageTagMirrors +} + +var map_AWSPlatformSpec = map[string]string{ + "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", +} + +func (AWSPlatformSpec) SwaggerDoc() map[string]string { + return map_AWSPlatformSpec +} + +var map_AWSPlatformStatus = map[string]string{ + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for AWS network resources. This controls whether AWS resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", +} + +func (AWSPlatformStatus) SwaggerDoc() map[string]string { + return map_AWSPlatformStatus +} + +var map_AWSResourceTag = map[string]string{ + "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", + "key": "key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. Key should consist of between 1 and 128 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.", + "value": "value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. Value should consist of between 1 and 256 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", +} + +func (AWSResourceTag) SwaggerDoc() map[string]string { + return map_AWSResourceTag +} + +var map_AWSServiceEndpoint = map[string]string{ + "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.", + "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (AWSServiceEndpoint) SwaggerDoc() map[string]string { + return map_AWSServiceEndpoint +} + +var map_AlibabaCloudPlatformSpec = map[string]string{ + "": "AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AlibabaCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformSpec +} + +var map_AlibabaCloudPlatformStatus = map[string]string{ + "": "AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.", + "region": "region specifies the region for Alibaba Cloud resources created for the cluster.", + "resourceGroupID": "resourceGroupID is the ID of the resource group for the cluster.", + "resourceTags": "resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.", +} + +func (AlibabaCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformStatus +} + +var map_AlibabaCloudResourceTag = map[string]string{ + "": "AlibabaCloudResourceTag is the set of tags to add to apply to resources.", + "key": "key is the key of the tag.", + "value": "value is the value of the tag.", +} + +func (AlibabaCloudResourceTag) SwaggerDoc() map[string]string { + return map_AlibabaCloudResourceTag +} + +var map_AzurePlatformSpec = map[string]string{ + "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AzurePlatformSpec) SwaggerDoc() map[string]string { + return map_AzurePlatformSpec +} + +var map_AzurePlatformStatus = map[string]string{ + "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", + "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", + "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", + "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", + "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", + "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for Azure network resources. This controls whether Azure resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", +} + +func (AzurePlatformStatus) SwaggerDoc() map[string]string { + return map_AzurePlatformStatus +} + +var map_AzureResourceTag = map[string]string{ + "": "AzureResourceTag is a tag to apply to Azure resources created for the cluster.", + "key": "key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`.", + "value": "value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.", +} + +func (AzureResourceTag) SwaggerDoc() map[string]string { + return map_AzureResourceTag +} + +var map_BareMetalPlatformLoadBalancer = map[string]string{ + "": "BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform.", + "type": "type defines the type of load balancer used by the cluster on BareMetal platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (BareMetalPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_BareMetalPlatformLoadBalancer +} + +var map_BareMetalPlatformSpec = map[string]string{ + "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", +} + +func (BareMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_BareMetalPlatformSpec +} + +var map_BareMetalPlatformStatus = map[string]string{ + "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", +} + +func (BareMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_BareMetalPlatformStatus +} + +var map_CloudControllerManagerStatus = map[string]string{ + "": "CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings", + "state": "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager\n\nValid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", +} + +func (CloudControllerManagerStatus) SwaggerDoc() map[string]string { + return map_CloudControllerManagerStatus +} + +var map_CloudLoadBalancerConfig = map[string]string{ + "": "CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's Load Balancer configuration needs to be provided so that the DNS solution hosted within the cluster can be configured with those values.", + "dnsType": "dnsType indicates the type of DNS solution in use within the cluster. Its default value of `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. The cluster's use of the cloud's Load Balancers is unaffected by this setting. The value is immutable after it has been set at install time. Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. Enabling this functionality allows the user to start their own DNS solution outside the cluster after installation is complete. The customer would be responsible for configuring this custom DNS solution, and it can be run in addition to the in-cluster DNS solution.", + "clusterHosted": "clusterHosted holds the IP addresses of API, API-Int and Ingress Load Balancers on Cloud Platforms. The DNS solution hosted within the cluster use these IP addresses to provide resolution for API, API-Int and Ingress services.", +} + +func (CloudLoadBalancerConfig) SwaggerDoc() map[string]string { + return map_CloudLoadBalancerConfig +} + +var map_CloudLoadBalancerIPs = map[string]string{ + "": "CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API, API-Int and Ingress Load balancers. They will be populated as soon as the respective Load Balancers have been configured. These values are utilized to configure the DNS solution hosted within the cluster.", + "apiIntLoadBalancerIPs": "apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the apiIntLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", + "apiLoadBalancerIPs": "apiLoadBalancerIPs holds Load Balancer IPs for the API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Could be empty for private clusters. Entries in the apiLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", + "ingressLoadBalancerIPs": "ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the ingressLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", +} + +func (CloudLoadBalancerIPs) SwaggerDoc() map[string]string { + return map_CloudLoadBalancerIPs +} + +var map_EquinixMetalPlatformSpec = map[string]string{ + "": "EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (EquinixMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformSpec +} + +var map_EquinixMetalPlatformStatus = map[string]string{ + "": "EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformStatus +} + +var map_ExternalPlatformSpec = map[string]string{ + "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", + "platformName": "platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", +} + +func (ExternalPlatformSpec) SwaggerDoc() map[string]string { + return map_ExternalPlatformSpec +} + +var map_ExternalPlatformStatus = map[string]string{ + "": "ExternalPlatformStatus holds the current status of the generic External infrastructure provider.", + "cloudControllerManager": "cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", +} + +func (ExternalPlatformStatus) SwaggerDoc() map[string]string { + return map_ExternalPlatformStatus +} + +var map_GCPPlatformSpec = map[string]string{ + "": "GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (GCPPlatformSpec) SwaggerDoc() map[string]string { + return map_GCPPlatformSpec +} + +var map_GCPPlatformStatus = map[string]string{ + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", + "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", + "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", +} + +func (GCPPlatformStatus) SwaggerDoc() map[string]string { + return map_GCPPlatformStatus +} + +var map_GCPResourceLabel = map[string]string{ + "": "GCPResourceLabel is a label to apply to GCP resources created for the cluster.", + "key": "key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`.", + "value": "value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`.", +} + +func (GCPResourceLabel) SwaggerDoc() map[string]string { + return map_GCPResourceLabel +} + +var map_GCPResourceTag = map[string]string{ + "": "GCPResourceTag is a tag to apply to GCP resources created for the cluster.", + "parentID": "parentID is the ID of the hierarchical resource where the tags are defined, e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.", + "key": "key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`.", + "value": "value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.", +} + +func (GCPResourceTag) SwaggerDoc() map[string]string { + return map_GCPResourceTag +} + +var map_IBMCloudPlatformSpec = map[string]string{ + "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", +} + +func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformSpec +} + +var map_IBMCloudPlatformStatus = map[string]string{ + "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", + "location": "location is where the cluster has been deployed", + "resourceGroupName": "resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "providerType indicates the type of cluster that was created", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", +} + +func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformStatus +} + +var map_IBMCloudServiceEndpoint = map[string]string{ + "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.", + "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. The path must follow the pattern /v[0,9]+ or /api/v[0,9]+", +} + +func (IBMCloudServiceEndpoint) SwaggerDoc() map[string]string { + return map_IBMCloudServiceEndpoint +} + +var map_Infrastructure = map[string]string{ + "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Infrastructure) SwaggerDoc() map[string]string { + return map_Infrastructure +} + +var map_InfrastructureList = map[string]string{ + "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (InfrastructureList) SwaggerDoc() map[string]string { + return map_InfrastructureList +} + +var map_InfrastructureSpec = map[string]string{ + "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.", + "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.\n\ncloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only.", + "platformSpec": "platformSpec holds desired information specific to the underlying infrastructure provider.", +} + +func (InfrastructureSpec) SwaggerDoc() map[string]string { + return map_InfrastructureSpec +} + +var map_InfrastructureStatus = map[string]string{ + "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", + "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", + "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", + "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", + "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", + "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", + "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", + "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", + "cpuPartitioning": "cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are \"None\" and \"AllNodes\". When omitted, the default value is \"None\". The default value of \"None\" indicates that no nodes will be setup with CPU partitioning. The \"AllNodes\" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API.", +} + +func (InfrastructureStatus) SwaggerDoc() map[string]string { + return map_InfrastructureStatus +} + +var map_KubevirtPlatformSpec = map[string]string{ + "": "KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (KubevirtPlatformSpec) SwaggerDoc() map[string]string { + return map_KubevirtPlatformSpec +} + +var map_KubevirtPlatformStatus = map[string]string{ + "": "KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { + return map_KubevirtPlatformStatus +} + +var map_NutanixFailureDomain = map[string]string{ + "": "NutanixFailureDomain configures failure domain information for the Nutanix platform.", + "name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.", + "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", +} + +func (NutanixFailureDomain) SwaggerDoc() map[string]string { + return map_NutanixFailureDomain +} + +var map_NutanixPlatformLoadBalancer = map[string]string{ + "": "NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform.", + "type": "type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (NutanixPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_NutanixPlatformLoadBalancer +} + +var map_NutanixPlatformSpec = map[string]string{ + "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", + "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", + "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.", + "failureDomains": "failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster.", +} + +func (NutanixPlatformSpec) SwaggerDoc() map[string]string { + return map_NutanixPlatformSpec +} + +var map_NutanixPlatformStatus = map[string]string{ + "": "NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", +} + +func (NutanixPlatformStatus) SwaggerDoc() map[string]string { + return map_NutanixPlatformStatus +} + +var map_NutanixPrismElementEndpoint = map[string]string{ + "": "NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster)", + "name": "name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc).", + "endpoint": "endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", +} + +func (NutanixPrismElementEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismElementEndpoint +} + +var map_NutanixPrismEndpoint = map[string]string{ + "": "NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)", + "address": "address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)", + "port": "port is the port number to access the Nutanix Prism Central or Element (cluster)", +} + +func (NutanixPrismEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismEndpoint +} + +var map_NutanixResourceIdentifier = map[string]string{ + "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", + "type": "type is the identifier type to use for this resource.", + "uuid": "uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID.", + "name": "name is the resource name in the PC. It cannot be empty if the type is Name.", +} + +func (NutanixResourceIdentifier) SwaggerDoc() map[string]string { + return map_NutanixResourceIdentifier +} + +var map_OpenStackPlatformLoadBalancer = map[string]string{ + "": "OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform.", + "type": "type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (OpenStackPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_OpenStackPlatformLoadBalancer +} + +var map_OpenStackPlatformSpec = map[string]string{ + "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", +} + +func (OpenStackPlatformSpec) SwaggerDoc() map[string]string { + return map_OpenStackPlatformSpec +} + +var map_OpenStackPlatformStatus = map[string]string{ + "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", +} + +func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { + return map_OpenStackPlatformStatus +} + +var map_OvirtPlatformLoadBalancer = map[string]string{ + "": "OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform.", + "type": "type defines the type of load balancer used by the cluster on Ovirt platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (OvirtPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_OvirtPlatformLoadBalancer +} + +var map_OvirtPlatformSpec = map[string]string{ + "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OvirtPlatformSpec) SwaggerDoc() map[string]string { + return map_OvirtPlatformSpec +} + +var map_OvirtPlatformStatus = map[string]string{ + "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", +} + +func (OvirtPlatformStatus) SwaggerDoc() map[string]string { + return map_OvirtPlatformStatus +} + +var map_PlatformSpec = map[string]string{ + "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"IBMCloud\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\", \"External\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", + "external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.", +} + +func (PlatformSpec) SwaggerDoc() map[string]string { + return map_PlatformSpec +} + +var map_PlatformStatus = map[string]string{ + "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", + "external": "external contains settings specific to the generic External infrastructure provider.", +} + +func (PlatformStatus) SwaggerDoc() map[string]string { + return map_PlatformStatus +} + +var map_PowerVSPlatformSpec = map[string]string{ + "": "PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", +} + +func (PowerVSPlatformSpec) SwaggerDoc() map[string]string { + return map_PowerVSPlatformSpec +} + +var map_PowerVSPlatformStatus = map[string]string{ + "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.", + "region": "region holds the default Power VS region for new Power VS resources created by the cluster.", + "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", + "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", +} + +func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { + return map_PowerVSPlatformStatus +} + +var map_PowerVSServiceEndpoint = map[string]string{ + "": "PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services.", + "name": "name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { + return map_PowerVSServiceEndpoint +} + +var map_VSphereFailureDomainHostGroup = map[string]string{ + "": "VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "vmGroup": "vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. vmGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "hostGroup": "hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. hostGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "vmHostRule": "vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. vmHostRule is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", +} + +func (VSphereFailureDomainHostGroup) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainHostGroup +} + +var map_VSphereFailureDomainRegionAffinity = map[string]string{ + "": "VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster.", + "type": "type determines the vSphere object type for a region within this failure domain. Available types are Datacenter and ComputeCluster. When set to Datacenter, this means the vCenter Datacenter defined is the region. When set to ComputeCluster, this means the vCenter cluster defined is the region.", +} + +func (VSphereFailureDomainRegionAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainRegionAffinity +} + +var map_VSphereFailureDomainZoneAffinity = map[string]string{ + "": "VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. This configuration within vCenter creates the required association between a failure domain, virtual machines and ESXi hosts to create a vm-host based zone.", + "type": "type determines the vSphere object type for a zone within this failure domain. Available types are ComputeCluster and HostGroup. When set to ComputeCluster, this means the vCenter cluster defined is the zone. When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and this means the zone is defined by the grouping of those fields.", + "hostGroup": "hostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", +} + +func (VSphereFailureDomainZoneAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainZoneAffinity +} + +var map_VSpherePlatformFailureDomainSpec = map[string]string{ + "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", + "name": "name defines the arbitrary but unique name of a failure domain.", + "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.", + "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", + "regionAffinity": "regionAffinity holds the type of region, Datacenter or ComputeCluster. When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.", + "zoneAffinity": "zoneAffinity holds the type of the zone and the hostGroup which vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", + "topology": "topology describes a given failure domain using vSphere constructs", +} + +func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformFailureDomainSpec +} + +var map_VSpherePlatformLoadBalancer = map[string]string{ + "": "VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform.", + "type": "type defines the type of load balancer used by the cluster on VSphere platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (VSpherePlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_VSpherePlatformLoadBalancer +} + +var map_VSpherePlatformNodeNetworking = map[string]string{ + "": "VSpherePlatformNodeNetworking holds the external and internal node networking spec.", + "external": "external represents the network configuration of the node that is externally routable.", + "internal": "internal represents the network configuration of the node that is routable only within the cluster.", +} + +func (VSpherePlatformNodeNetworking) SwaggerDoc() map[string]string { + return map_VSpherePlatformNodeNetworking +} + +var map_VSpherePlatformNodeNetworkingSpec = map[string]string{ + "": "VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for including and excluding IP ranges in the cloud provider. This would be used for example when multiple network adapters are attached to a guest to help determine which IP address the cloud config manager should use for the external and internal node networking.", + "networkSubnetCidr": "networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields.", + "network": "network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'`", + "excludeNetworkSubnetCidr": "excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields.", +} + +func (VSpherePlatformNodeNetworkingSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformNodeNetworkingSpec +} + +var map_VSpherePlatformSpec = map[string]string{ + "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. In the future the cloud provider operator, storage operator and machine operator will use these fields for configuration.", + "vcenters": "vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported, but in tech preview 3 vCenters are supported. Once the cluster has been installed, you are unable to change the current number of defined vCenters except in the case where the cluster has been upgraded from a version of OpenShift where the vsphere platform spec was not present. You may make modifications to the existing vCenters that are defined in the vcenters list in order to match with any added or modified failure domains.", + "failureDomains": "failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used.", + "nodeNetworking": "nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", +} + +func (VSpherePlatformSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformSpec +} + +var map_VSpherePlatformStatus = map[string]string{ + "": "VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", +} + +func (VSpherePlatformStatus) SwaggerDoc() map[string]string { + return map_VSpherePlatformStatus +} + +var map_VSpherePlatformTopology = map[string]string{ + "": "VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, computeCluster, networks, datastore and resourcePool - to provision virtual machines.", + "datacenter": "datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters.", + "computeCluster": "computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters.", + "networks": "networks is the list of port group network names within this failure domain. If feature gate VSphereMultiNetworks is enabled, up to 10 network adapters may be defined. 10 is the maximum number of virtual network devices which may be attached to a VM as defined by: https://configmax.esp.vmware.com/guest?vmwareproduct=vSphere&release=vSphere%208.0&categories=1-0 The available networks (port groups) can be listed using `govc ls 'network/*'` Networks should be in the form of an absolute path: //network/.", + "datastore": "datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters.", + "resourcePool": "resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters.", + "folder": "folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters.", + "template": "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters.\n\nWhen omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea.", +} + +func (VSpherePlatformTopology) SwaggerDoc() map[string]string { + return map_VSpherePlatformTopology +} + +var map_VSpherePlatformVCenterSpec = map[string]string{ + "": "VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM.", + "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", + "port": "port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time.", + "datacenters": "The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology.", +} + +func (VSpherePlatformVCenterSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformVCenterSpec +} + +var map_AWSIngressSpec = map[string]string{ + "": "AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", + "type": "type allows user to set a load balancer type. When this field is set the default ingresscontroller will get created using the specified LBType. If this field is not set then the default ingress controller of LBType Classic will be created. Valid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", +} + +func (AWSIngressSpec) SwaggerDoc() map[string]string { + return map_AWSIngressSpec +} + +var map_ComponentRouteSpec = map[string]string{ + "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "hostname": "hostname is the hostname that should be used by the route.", + "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ComponentRouteSpec) SwaggerDoc() map[string]string { + return map_ComponentRouteSpec +} + +var map_ComponentRouteStatus = map[string]string{ + "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "defaultHostname": "defaultHostname is the hostname of this route prior to customization.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.", + "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.", + "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.", + "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.", +} + +func (ComponentRouteStatus) SwaggerDoc() map[string]string { + return map_ComponentRouteStatus +} + +var map_Ingress = map[string]string{ + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressPlatformSpec = map[string]string{ + "": "IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", +} + +func (IngressPlatformSpec) SwaggerDoc() map[string]string { + return map_IngressPlatformSpec +} + +var map_IngressSpec = map[string]string{ + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", + "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", + "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.", + "requiredHSTSPolicies": "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission.\n\nA candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains\n\n- For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation.\n\nThe HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working.\n\nNote that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid.", + "loadBalancer": "loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure provider of the current cluster and are required for Ingress Controller to work on OpenShift.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", + "defaultPlacement": "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes.\n\nThis field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments.\n\nSee the documentation for the IngressController replicas and nodePlacement fields for more information.\n\nWhen omitted, the default value is Workers", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_LoadBalancer = map[string]string{ + "platform": "platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", +} + +func (LoadBalancer) SwaggerDoc() map[string]string { + return map_LoadBalancer +} + +var map_Custom = map[string]string{ + "": "Custom provides the custom configuration of gatherers", + "configs": "configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. It may not exceed 100 items and each gatherer can be present only once. It is possible to disable an entire set of gatherers while allowing a specific function within that set. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", +} + +func (Custom) SwaggerDoc() map[string]string { + return map_Custom +} + +var map_GatherConfig = map[string]string{ + "": "GatherConfig provides data gathering configuration options.", + "dataPolicy": "dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. It may not exceed 2 items and must not contain duplicates. Valid values are ObfuscateNetworking and WorkloadNames. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. When omitted no obfuscation is applied.", + "gatherers": "gatherers is a required field that specifies the configuration of the gatherers.", + "storage": "storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. If omitted, the gathering job will use ephemeral storage.", +} + +func (GatherConfig) SwaggerDoc() map[string]string { + return map_GatherConfig +} + +var map_GathererConfig = map[string]string{ + "": "GathererConfig allows to configure specific gatherers", + "name": "name is the required name of a specific gatherer. It may not exceed 256 characters. The format for a gatherer name is: {gatherer}/{function} where the function is optional. Gatherer consists of a lowercase letters only that may include underscores (_). Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", + "state": "state is a required field that allows you to configure specific gatherer. Valid values are \"Enabled\" and \"Disabled\". When set to Enabled the gatherer will run. When set to Disabled the gatherer will not run.", +} + +func (GathererConfig) SwaggerDoc() map[string]string { + return map_GathererConfig +} + +var map_Gatherers = map[string]string{ + "": "Gatherers specifies the configuration of the gatherers", + "mode": "mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. When set to All, all gatherers will run and gather data. When set to None, all gatherers will be disabled and no data will be gathered. When set to Custom, the custom configuration from the custom field will be applied.", + "custom": "custom provides gathering configuration. It is required when mode is Custom, and forbidden otherwise. Custom configuration allows user to disable only a subset of gatherers. Gatherers that are not explicitly disabled in custom configuration will run.", +} + +func (Gatherers) SwaggerDoc() map[string]string { + return map_Gatherers +} + +var map_InsightsDataGather = map[string]string{ + "": "InsightsDataGather provides data gather configuration options for the Insights Operator.\n\n\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", +} + +func (InsightsDataGather) SwaggerDoc() map[string]string { + return map_InsightsDataGather +} + +var map_InsightsDataGatherList = map[string]string{ + "": "InsightsDataGatherList is a collection of items Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the required standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the required list of InsightsDataGather objects it may not exceed 100 items", +} + +func (InsightsDataGatherList) SwaggerDoc() map[string]string { + return map_InsightsDataGatherList +} + +var map_InsightsDataGatherSpec = map[string]string{ + "": "InsightsDataGatherSpec contains the configuration for the data gathering.", + "gatherConfig": "gatherConfig is a required spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress.", +} + +func (InsightsDataGatherSpec) SwaggerDoc() map[string]string { + return map_InsightsDataGatherSpec +} + +var map_PersistentVolumeClaimReference = map[string]string{ + "": "PersistentVolumeClaimReference is a reference to a PersistentVolumeClaim.", + "name": "name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. It is a string that follows the DNS1123 subdomain format. It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character.", +} + +func (PersistentVolumeClaimReference) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimReference +} + +var map_PersistentVolumeConfig = map[string]string{ + "": "PersistentVolumeConfig provides configuration options for PersistentVolume storage.", + "claim": "claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. The PersistentVolumeClaim must be created in the openshift-insights namespace.", + "mountPath": "mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default mount path is /var/lib/insights-operator The path may not exceed 1024 characters and must not contain a colon.", +} + +func (PersistentVolumeConfig) SwaggerDoc() map[string]string { + return map_PersistentVolumeConfig +} + +var map_Storage = map[string]string{ + "": "Storage provides persistent storage configuration options for gathering jobs. If the type is set to PersistentVolume, then the PersistentVolume must be defined. If the type is set to Ephemeral, then the PersistentVolume must not be defined.", + "type": "type is a required field that specifies the type of storage that will be used to store the Insights data archive. Valid values are \"PersistentVolume\" and \"Ephemeral\". When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field.", + "persistentVolume": "persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. The PersistentVolume must be created in the openshift-insights namespace.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + +var map_AWSKMSConfig = map[string]string{ + "": "AWSKMSConfig defines the KMS config specific to AWS KMS provider", + "keyARN": "keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. The value must adhere to the format `arn:aws:kms:::key/`, where: - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - `` is a 12-digit numeric identifier for the AWS account. - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens.", + "region": "region specifies the AWS region where the KMS instance exists, and follows the format `--`, e.g.: `us-east-1`. Only lowercase letters and hyphens followed by numbers are allowed.", +} + +func (AWSKMSConfig) SwaggerDoc() map[string]string { + return map_AWSKMSConfig +} + +var map_KMSConfig = map[string]string{ + "": "KMSConfig defines the configuration for the KMS instance that will be used with KMSEncryptionProvider encryption", + "type": "type defines the kind of platform for the KMS provider. Available provider types are AWS only.", + "aws": "aws defines the key config for using an AWS KMS instance for the encryption. The AWS KMS instance is managed by the user outside the purview of the control plane.", +} + +func (KMSConfig) SwaggerDoc() map[string]string { + return map_KMSConfig +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", + "cidr": "The complete block for pod IPs.", + "hostPrefix": "The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ExternalIPConfig = map[string]string{ + "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", + "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.", + "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", +} + +func (ExternalIPConfig) SwaggerDoc() map[string]string { + return map_ExternalIPConfig +} + +var map_ExternalIPPolicy = map[string]string{ + "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.", + "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.", + "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.", +} + +func (ExternalIPPolicy) SwaggerDoc() map[string]string { + return map_ExternalIPPolicy +} + +var map_MTUMigration = map[string]string{ + "": "MTUMigration contains infomation about MTU migration.", + "network": "network contains MTU migration configuration for the default network.", + "machine": "machine contains MTU migration configuration for the machine's uplink.", +} + +func (MTUMigration) SwaggerDoc() map[string]string { + return map_MTUMigration +} + +var map_MTUMigrationValues = map[string]string{ + "": "MTUMigrationValues contains the values for a MTU migration.", + "to": "to is the MTU to migrate to.", + "from": "from is the MTU to migrate from.", +} + +func (MTUMigrationValues) SwaggerDoc() map[string]string { + return map_MTUMigrationValues +} + +var map_Network = map[string]string{ + "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkDiagnostics = map[string]string{ + "mode": "mode controls the network diagnostics mode\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is All.", + "sourcePlacement": "sourcePlacement controls the scheduling of network diagnostics source deployment\n\nSee NetworkDiagnosticsSourcePlacement for more details about default values.", + "targetPlacement": "targetPlacement controls the scheduling of network diagnostics target daemonset\n\nSee NetworkDiagnosticsTargetPlacement for more details about default values.", +} + +func (NetworkDiagnostics) SwaggerDoc() map[string]string { + return map_NetworkDiagnostics +} + +var map_NetworkDiagnosticsSourcePlacement = map[string]string{ + "": "NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is an empty list.", +} + +func (NetworkDiagnosticsSourcePlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsSourcePlacement +} + +var map_NetworkDiagnosticsTargetPlacement = map[string]string{ + "": "NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `- operator: \"Exists\"` which means that all taints are tolerated.", +} + +func (NetworkDiagnosticsTargetPlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsTargetPlacement +} + +var map_NetworkList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the network migration status.", + "networkType": "networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", + "mtu": "mtu is the MTU configuration that is being deployed.", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", + "networkType": "networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", + "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", + "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", + "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is the current network configuration.", + "clusterNetwork": "IP address pool to use for pod IPs.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", + "networkType": "networkType is the plugin that is deployed (e.g. OVNKubernetes).", + "clusterNetworkMTU": "clusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "migration contains the cluster network migration configuration.", + "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkDiagnosticsAvailable\"", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_Node = map[string]string{ + "": "Node holds cluster-wide information about node specific features.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values.", +} + +func (Node) SwaggerDoc() map[string]string { + return map_Node +} + +var map_NodeList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (NodeList) SwaggerDoc() map[string]string { + return map_NodeList +} + +var map_NodeSpec = map[string]string{ + "cgroupMode": "cgroupMode determines the cgroups version on the node", + "workerLatencyProfile": "workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", +} + +func (NodeSpec) SwaggerDoc() map[string]string { + return map_NodeSpec +} + +var map_NodeStatus = map[string]string{ + "conditions": "conditions contain the details and the current state of the nodes.config object", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_BasicAuthIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", +} + +func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthIdentityProvider +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "url": "url is the oauth server base URL", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_HTPasswdIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials", + "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdIdentityProvider +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_IdentityProviderConfig = map[string]string{ + "": "IdentityProviderConfig contains configuration for using a specific identity provider", + "type": "type identifies the identity provider type for this entry.", + "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP", + "github": "github enables user authentication using GitHub credentials", + "gitlab": "gitlab enables user authentication using GitLab credentials", + "google": "google enables user authentication using Google credentials", + "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials", + "keystone": "keystone enables user authentication using keystone password credentials", + "ldap": "ldap enables user authentication using LDAP credentials", + "openID": "openID enables user authentication using OpenID credentials", + "requestHeader": "requestHeader enables user authentication using request header credentials", +} + +func (IdentityProviderConfig) SwaggerDoc() map[string]string { + return map_IdentityProviderConfig +} + +var map_KeystoneIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials", + "domainName": "domainName is required for keystone v3", +} + +func (KeystoneIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystoneIdentityProvider +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "attributes": "attributes maps LDAP attributes to identities", +} + +func (LDAPIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPIdentityProvider +} + +var map_OAuth = map[string]string{ + "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OAuth) SwaggerDoc() map[string]string { + return map_OAuth +} + +var map_OAuthList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OAuthList) SwaggerDoc() map[string]string { + return map_OAuthList +} + +var map_OAuthRemoteConnectionInfo = map[string]string{ + "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "url is the remote URL to connect to", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", + "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_OAuthRemoteConnectionInfo +} + +var map_OAuthSpec = map[string]string{ + "": "OAuthSpec contains desired cluster auth configuration", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", +} + +func (OAuthSpec) SwaggerDoc() map[string]string { + return map_OAuthSpec +} + +var map_OAuthStatus = map[string]string{ + "": "OAuthStatus shows current known state of OAuth server in the cluster", +} + +func (OAuthStatus) SwaggerDoc() map[string]string { + return map_OAuthStatus +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.", + "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.", + "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used.", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.", + "claims": "claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.", + "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +var map_HubSource = map[string]string{ + "": "HubSource is used to specify the hub source and its configuration", + "name": "name is the name of one of the default hub sources", + "disabled": "disabled is used to disable a default hub source on cluster", +} + +func (HubSource) SwaggerDoc() map[string]string { + return map_HubSource +} + +var map_HubSourceStatus = map[string]string{ + "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source", + "status": "status indicates success or failure in applying the configuration", + "message": "message provides more information regarding failures", +} + +func (HubSourceStatus) SwaggerDoc() map[string]string { + return map_HubSourceStatus +} + +var map_OperatorHub = map[string]string{ + "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OperatorHub) SwaggerDoc() map[string]string { + return map_OperatorHub +} + +var map_OperatorHubList = map[string]string{ + "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OperatorHubList) SwaggerDoc() map[string]string { + return map_OperatorHubList +} + +var map_OperatorHubSpec = map[string]string{ + "": "OperatorHubSpec defines the desired state of OperatorHub", + "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", + "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", +} + +func (OperatorHubSpec) SwaggerDoc() map[string]string { + return map_OperatorHubSpec +} + +var map_OperatorHubStatus = map[string]string{ + "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.", + "sources": "sources encapsulates the result of applying the configuration for each hub source", +} + +func (OperatorHubStatus) SwaggerDoc() map[string]string { + return map_OperatorHubStatus +} + +var map_Project = map[string]string{ + "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec holds the project creation configuration.", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_TemplateReference = map[string]string{ + "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced project request template", +} + +func (TemplateReference) SwaggerDoc() map[string]string { + return map_TemplateReference +} + +var map_Proxy = map[string]string{ + "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user-settable values for the proxy configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Proxy) SwaggerDoc() map[string]string { + return map_Proxy +} + +var map_ProxyList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ProxyList) SwaggerDoc() map[string]string { + return map_ProxyList +} + +var map_ProxySpec = map[string]string{ + "": "ProxySpec contains cluster proxy creation configuration.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var.", + "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", +} + +func (ProxySpec) SwaggerDoc() map[string]string { + return map_ProxySpec +} + +var map_ProxyStatus = map[string]string{ + "": "ProxyStatus shows current known state of the cluster proxy.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.", +} + +func (ProxyStatus) SwaggerDoc() map[string]string { + return map_ProxyStatus +} + +var map_ProfileCustomizations = map[string]string{ + "": "ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles", + "dynamicResourceAllocation": "dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler. Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod. Third-party resource drivers are responsible for tracking and allocating resources. Different kinds of resources support arbitrary parameters for defining requirements and initialization. Valid values are Enabled, Disabled and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Disabled.", +} + +func (ProfileCustomizations) SwaggerDoc() map[string]string { + return map_ProfileCustomizations +} + +var map_Scheduler = map[string]string{ + "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Scheduler) SwaggerDoc() map[string]string { + return map_Scheduler +} + +var map_SchedulerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (SchedulerList) SwaggerDoc() map[string]string { + return map_SchedulerList +} + +var map_SchedulerSpec = map[string]string{ + "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", + "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", + "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. Deprecated: no longer needed, since DRA is GA starting with 4.21, and is enabled by' default in the cluster, this field will be removed in 4.24.", + "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", + "mastersSchedulable": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", +} + +func (SchedulerSpec) SwaggerDoc() map[string]string { + return map_SchedulerSpec +} + +var map_FeatureGateTests = map[string]string{ + "featureGate": "featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "tests": "tests contains an item for every TestName", +} + +func (FeatureGateTests) SwaggerDoc() map[string]string { + return map_FeatureGateTests +} + +var map_TestDetails = map[string]string{ + "testName": "testName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", +} + +func (TestDetails) SwaggerDoc() map[string]string { + return map_TestDetails +} + +var map_TestReporting = map[string]string{ + "": "TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into the payload for later analysis on a per-payload basis. This doesn't need any CRD because it's never stored in the cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (TestReporting) SwaggerDoc() map[string]string { + return map_TestReporting +} + +var map_TestReportingSpec = map[string]string{ + "testsForFeatureGates": "testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", +} + +func (TestReportingSpec) SwaggerDoc() map[string]string { + return map_TestReportingSpec +} + +var map_CustomTLSProfile = map[string]string{ + "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", +} + +func (CustomTLSProfile) SwaggerDoc() map[string]string { + return map_CustomTLSProfile +} + +var map_IntermediateTLSProfile = map[string]string{ + "": "IntermediateTLSProfile is a TLS security profile based on the \"intermediate\" configuration of the Mozilla Server Side TLS configuration guidelines.", +} + +func (IntermediateTLSProfile) SwaggerDoc() map[string]string { + return map_IntermediateTLSProfile +} + +var map_ModernTLSProfile = map[string]string{ + "": "ModernTLSProfile is a TLS security profile based on the \"modern\" configuration of the Mozilla Server Side TLS configuration guidelines.", +} + +func (ModernTLSProfile) SwaggerDoc() map[string]string { + return map_ModernTLSProfile +} + +var map_OldTLSProfile = map[string]string{ + "": "OldTLSProfile is a TLS security profile based on the \"old\" configuration of the Mozilla Server Side TLS configuration guidelines.", +} + +func (OldTLSProfile) SwaggerDoc() map[string]string { + return map_OldTLSProfile +} + +var map_TLSProfileSpec = map[string]string{ + "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11", +} + +func (TLSProfileSpec) SwaggerDoc() map[string]string { + return map_TLSProfileSpec +} + +var map_TLSSecurityProfile = map[string]string{ + "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters.\n\nThe profiles are currently based on version 5.0 of the Mozilla Server Side TLS configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", + "old": "old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"old\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS10\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA", + "intermediate": "intermediate is a TLS profile for use when you do not need compatibility with legacy clients and want to remain highly secure while being compatible with most clients currently in use.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"intermediate\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS12\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384", + "modern": "modern is a TLS security profile for use with clients that support TLS 1.3 and do not need backward compatibility for older clients.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS13\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n minTLSVersion: VersionTLS11\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256", +} + +func (TLSSecurityProfile) SwaggerDoc() map[string]string { + return map_TLSSecurityProfile +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/LICENSE b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/LICENSE similarity index 100% rename from openshift/tools/vendor/github.com/prometheus/client_golang/LICENSE rename to openshift/tools/vendor/github.com/openshift/cluster-capi-operator/LICENSE diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/customizations.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/customizations.go index 7f017c3bf8..7968642878 100644 --- a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/customizations.go +++ b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/customizations.go @@ -1,36 +1,24 @@ package main import ( - "bytes" + "fmt" "strings" certmangerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "github.com/google/go-containerregistry/pkg/name" admissionregistration "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "sigs.k8s.io/kustomize/api/krusty" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -type resourceKey string - -const ( - crdKey resourceKey = "crds" - otherKey resourceKey = "other" - managedByAnnotationValueClusterCAPIOperatorInfraClusterController = "cluster-capi-operator-infracluster-controller" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" ) var ( - openshiftAnnotations = map[string]string{ - "exclude.release.openshift.io/internal-openshift-hosted": "true", - "include.release.openshift.io/self-managed-high-availability": "true", - "include.release.openshift.io/single-node-developer": "true", - } - // Workload annotations are used by the workload admission webhook to modify pod // resources and correctly schedule them while also pinning them to specific CPUSets. // See for more info: @@ -39,105 +27,78 @@ var ( "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, } - // featureSetAnnotationValue is a multiple-feature-sets annotation value - // adhering to the: %s,%s,... notation defined in the openshift/library-go/pkg/manifest parser. - featureSetAnnotationValue = "CustomNoUpgrade,TechPreviewNoUpgrade" - featureSetAnnotationKey = "release.openshift.io/feature-set" + // The expected registry for images used by the cluster-capi-operator. + expectedRegistry = "registry.ci.openshift.org" ) -func processObjects(objs []unstructured.Unstructured, providerName string) map[resourceKey][]unstructured.Unstructured { - resourceMap := map[resourceKey][]unstructured.Unstructured{} - providerConfigMapObjs := []unstructured.Unstructured{} - crdObjs := []unstructured.Unstructured{} - - objs = addInfraClusterProtectionPolicy(objs, providerName) +func processObjects(objs []client.Object, opts cmdlineOptions) ([]client.Object, error) { + providerConfigMapObjs := make([]client.Object, 0, len(objs)) serviceSecretNames := findWebhookServiceSecretName(objs) + var extraObjects []client.Object + for _, obj := range objs { - providerCustomizations(&obj, providerName) - switch obj.GetKind() { - case "ClusterRole", "Role", "ClusterRoleBinding", "RoleBinding", "ServiceAccount": - setOpenShiftAnnotations(obj, false) - setNoUpgradeAnnotations(obj) - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "MutatingWebhookConfiguration": - replaceCertManagerAnnotations(&obj) - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "ValidatingWebhookConfiguration": - replaceCertManagerAnnotations(&obj) - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "CustomResourceDefinition": - replaceCertManagerAnnotations(&obj) - setOpenShiftAnnotations(obj, true) - // Apply NoUpgrade annotations unless IPAM CRDs, - // as those are in General Availability. - if !isCRDGroup(&obj, "ipam.cluster.x-k8s.io") { - setNoUpgradeAnnotations(obj) - } - // Store Core CAPI CRDs in their own manifest to get them applied by CVO directly. - // We want these to be installed independently from whether the cluster-capi-operator is enabled, - // as other Openshift components rely on them. - if providerName == coreCAPIProvider { - crdObjs = append(crdObjs, obj) - } else { - providerConfigMapObjs = append(providerConfigMapObjs, obj) + switch getGroup(obj) { + case "admissionregistration.k8s.io": + switch getKind(obj) { + case "MutatingWebhookConfiguration", "ValidatingWebhookConfiguration": + replaceCertManagerAnnotations(obj) } - case "Service": - replaceCertMangerServiceSecret(&obj, serviceSecretNames) - setOpenShiftAnnotations(obj, true) - setNoUpgradeAnnotations(obj) - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "Deployment": - customizeDeployments(&obj) - if providerName == "operator" { - setOpenShiftAnnotations(obj, false) - setNoUpgradeAnnotations(obj) + + case "apiextensions.k8s.io": + switch getKind(obj) { + case "CustomResourceDefinition": + replaceCertManagerAnnotations(obj) + + // Generate a protection policy for an InfraCluster + // If the user provided a specific InfraCluster resource name, match it exactly. + // Otherwise, match any CRD in the 'infrastructure.cluster.x-k8s.io' group that ends in 'clusters'. + crd := &apiextensionsv1.CustomResourceDefinition{} + mustConvert(obj, crd) + if (opts.protectClusterResource != "" && crd.Spec.Names.Singular == opts.protectClusterResource) || + (opts.protectClusterResource == "" && crd.Spec.Group == "infrastructure.cluster.x-k8s.io" && strings.HasSuffix(crd.Spec.Names.Plural, "clusters")) { + protectionPolicy := generateInfraClusterProtectionPolicy(crd) + extraObjects = append(extraObjects, protectionPolicy...) + } } - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "ValidatingAdmissionPolicy": - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "ValidatingAdmissionPolicyBinding": - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "ConfigMap": - providerConfigMapObjs = append(providerConfigMapObjs, obj) - case "Certificate", "Issuer", "Namespace", "Secret": // skip - } - } - resourceMap[crdKey] = crdObjs - resourceMap[otherKey] = providerConfigMapObjs + case "": // core API group + switch getKind(obj) { + case "Service": + replaceCertMangerServiceSecret(obj, serviceSecretNames) - return resourceMap -} + case "Namespace", "Secret": + // Don't emit these resources + continue + } -func setOpenShiftAnnotations(obj unstructured.Unstructured, merge bool) { - if !merge || len(obj.GetAnnotations()) == 0 { - obj.SetAnnotations(openshiftAnnotations) - } + case "apps": + switch getKind(obj) { + case "Deployment": + deploy, err := customizeDeployment(obj) + if err != nil { + return nil, fmt.Errorf("failed to customize deployment %q: %w", obj.GetName(), err) + } + obj = deploy + } - anno := obj.GetAnnotations() - if anno == nil { - anno = map[string]string{} - } + case "cert-manager.io": + // Upstream CAPI uses cert-manager.io for cert management by + // default, and most providers will use it too. Don't emit anything + // related to cert-manager. + continue + } - for k, v := range openshiftAnnotations { - anno[k] = v + providerConfigMapObjs = append(providerConfigMapObjs, obj) } - obj.SetAnnotations(anno) -} -func setNoUpgradeAnnotations(obj unstructured.Unstructured) { - anno := obj.GetAnnotations() - if anno == nil { - anno = map[string]string{} - } + providerConfigMapObjs = append(providerConfigMapObjs, extraObjects...) - anno[featureSetAnnotationKey] = featureSetAnnotationValue - obj.SetAnnotations(anno) + return providerConfigMapObjs, nil } -func findWebhookServiceSecretName(objs []unstructured.Unstructured) map[string]string { +func findWebhookServiceSecretName(objs []client.Object) map[string]string { serviceSecretNames := map[string]string{} certSecretNames := map[string]string{} @@ -152,25 +113,25 @@ func findWebhookServiceSecretName(objs []unstructured.Unstructured) map[string]s } return secretName, true } + // find service, then cert, then secret // return map[certName] = secretName - for i, obj := range objs { - switch obj.GetKind() { + for _, obj := range objs { + switch getKind(obj) { case "Certificate": cert := &certmangerv1.Certificate{} - if err := scheme.Convert(&objs[i], cert, nil); err != nil { - panic(err) - } + mustConvert(obj, cert) + certSecretNames[cert.Name] = cert.Spec.SecretName } } + for _, obj := range objs { - switch obj.GetKind() { + switch getKind(obj) { case "CustomResourceDefinition": crd := &apiextensionsv1.CustomResourceDefinition{} - if err := scheme.Convert(&obj, crd, nil); err != nil { - panic(err) - } + mustConvert(obj, crd) + if certNN, ok := crd.Annotations["cert-manager.io/inject-ca-from"]; ok { secretName, ok := secretFromCertNN(certNN) if !ok { @@ -183,9 +144,8 @@ func findWebhookServiceSecretName(objs []unstructured.Unstructured) map[string]s case "MutatingWebhookConfiguration": mwc := &admissionregistration.MutatingWebhookConfiguration{} - if err := scheme.Convert(&obj, mwc, nil); err != nil { - panic(err) - } + mustConvert(obj, mwc) + if certNN, ok := mwc.Annotations["cert-manager.io/inject-ca-from"]; ok { secretName, ok := secretFromCertNN(certNN) if !ok { @@ -196,9 +156,8 @@ func findWebhookServiceSecretName(objs []unstructured.Unstructured) map[string]s case "ValidatingWebhookConfiguration": vwc := &admissionregistration.ValidatingWebhookConfiguration{} - if err := scheme.Convert(&obj, vwc, nil); err != nil { - panic(err) - } + mustConvert(obj, vwc) + if certNN, ok := vwc.Annotations["cert-manager.io/inject-ca-from"]; ok { secretName, ok := secretFromCertNN(certNN) if !ok { @@ -211,11 +170,10 @@ func findWebhookServiceSecretName(objs []unstructured.Unstructured) map[string]s return serviceSecretNames } -func customizeDeployments(obj *unstructured.Unstructured) { +func customizeDeployment(obj client.Object) (client.Object, error) { deployment := &appsv1.Deployment{} - if err := scheme.Convert(obj, deployment, nil); err != nil { - panic(err) - } + mustConvert(obj, deployment) + deployment.Spec.Template.Spec.PriorityClassName = "system-cluster-critical" deployment.Spec.Template.Annotations = mergeMaps(deployment.Spec.Template.Annotations, openshiftWorkloadAnnotation) @@ -229,24 +187,24 @@ func customizeDeployments(obj *unstructured.Unstructured) { } // Remove any existing resource limits. See: https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#resources-and-limits container.Resources.Limits = corev1.ResourceList{} - // Remove all image references if they are external, they will be substituted operator later - if !strings.HasPrefix(container.Image, "registry.ci.openshift.org") { - container.Image = "to.be/replaced:v99" - } - if container.Name == "kube-rbac-proxy" { - container.Image = "registry.ci.openshift.org/openshift:kube-rbac-proxy" - } // This helps with debugging and is enforced in OCP, see https://issues.redhat.com/browse/OCPBUGS-33170. container.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError - } - if err := scheme.Convert(deployment, obj, nil); err != nil { - panic(err) + // We expect all images to use registry.ci.openshift.org. Other images won't be replaced, which would be an error. + ref, err := name.ParseReference(container.Image) + if err != nil { + return nil, fmt.Errorf("failed to parse image reference %q: %w", container.Image, err) + } + if ref.Context().RegistryStr() != expectedRegistry { + return nil, fmt.Errorf("image %q has registry %q, expected %q", container.Image, ref.Context().RegistryStr(), expectedRegistry) + } } + + return deployment, nil } -func replaceCertManagerAnnotations(obj *unstructured.Unstructured) { +func replaceCertManagerAnnotations(obj client.Object) { anns := obj.GetAnnotations() if anns == nil { anns = map[string]string{} @@ -258,7 +216,7 @@ func replaceCertManagerAnnotations(obj *unstructured.Unstructured) { } } -func replaceCertMangerServiceSecret(obj *unstructured.Unstructured, serviceSecretNames map[string]string) { +func replaceCertMangerServiceSecret(obj client.Object, serviceSecretNames map[string]string) { anns := obj.GetAnnotations() if anns == nil { anns = map[string]string{} @@ -269,43 +227,6 @@ func replaceCertMangerServiceSecret(obj *unstructured.Unstructured, serviceSecre } } -// isCRDGroup checks whether the object provided is a CRD for the specified API group. -func isCRDGroup(obj *unstructured.Unstructured, group string) bool { - switch obj.GetKind() { - case "CustomResourceDefinition": - crd := &apiextensions.CustomResourceDefinition{} - if err := scheme.Convert(obj, crd, nil); err != nil { - panic(err) - } - - if crd.Spec.Group == group { - return true - } - - return false - default: - return false - } -} - -// ensureNewLine makes sure that there is one new line at the end of the file for git -func ensureNewLine(b []byte) []byte { - return append(bytes.TrimRight(b, "\n"), []byte("\n")...) -} - -func fetchAndCompileComponents(url string) ([]byte, error) { - k := krusty.MakeKustomizer(krusty.MakeDefaultOptions()) - - fSys := filesys.MakeFsOnDisk() - - m, err := k.Run(fSys, url) - if err != nil { - return nil, err - } - - return m.AsYaml() -} - // Variadic function to merge maps of like kind. // Note: keys of next map will override keys in previous map if previous map contains same key. func mergeMaps[K comparable, V any](maps ...map[K]V) map[K]V { @@ -318,66 +239,96 @@ func mergeMaps[K comparable, V any](maps ...map[K]V) map[K]V { return result } -// addInfraClusterProtectionPolicy adds a Validating Admission Policy and Binding for protecting +// generateInfraClusterProtectionPolicy generates a Validating Admission Policy and Binding for protecting // InfraClusters created by the cluster-capi-operator from deletion and editing. -func addInfraClusterProtectionPolicy(objs []unstructured.Unstructured, providerName string) []unstructured.Unstructured { - policy := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "admissionregistration.k8s.io/v1", - "kind": "ValidatingAdmissionPolicy", - "metadata": map[string]interface{}{ - "name": "openshift-cluster-api-protect-" + providerName + "cluster", +func generateInfraClusterProtectionPolicy(crd *apiextensionsv1.CustomResourceDefinition) []client.Object { + var policy client.Object = &admissionregistration.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "openshift-cluster-api-protect-" + crd.Spec.Names.Singular, + }, + Spec: admissionregistration.ValidatingAdmissionPolicySpec{ + FailurePolicy: ptr.To(admissionregistration.Fail), + ParamKind: &admissionregistration.ParamKind{ + APIVersion: "config.openshift.io/v1", + Kind: "Infrastructure", }, - "spec": map[string]interface{}{ - "failurePolicy": "Fail", - "paramKind": map[string]interface{}{ - "apiVersion": "config.openshift.io/v1", - "kind": "Infrastructure", - }, - "matchConstraints": map[string]interface{}{ - "resourceRules": []interface{}{ - map[string]interface{}{ - "apiGroups": []interface{}{"infrastructure.cluster.x-k8s.io"}, - "apiVersions": []interface{}{"*"}, - "operations": []interface{}{"DELETE"}, - "resources": []interface{}{providerName + "clusters"}, + MatchConstraints: &admissionregistration.MatchResources{ + ResourceRules: []admissionregistration.NamedRuleWithOperations{ + { + RuleWithOperations: admissionregistration.RuleWithOperations{ + Operations: []admissionregistration.OperationType{admissionregistration.Delete}, + Rule: admissionregistration.Rule{ + APIGroups: []string{crd.Spec.Group}, + APIVersions: []string{"*"}, + Resources: []string{crd.Spec.Names.Plural}, + }, }, }, }, - "validations": []interface{}{ - map[string]interface{}{ - "expression": "!(oldObject.metadata.name == params.status.infrastructureName)", - "message": "InfraCluster resources with metadata.name corresponding to the cluster infrastructureName cannot be deleted.", - }, + }, + Validations: []admissionregistration.Validation{ + { + Expression: "!(oldObject.metadata.name == params.status.infrastructureName)", + Message: "InfraCluster resources with metadata.name corresponding to the cluster infrastructureName cannot be deleted.", }, }, }, } - binding := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "admissionregistration.k8s.io/v1", - "kind": "ValidatingAdmissionPolicyBinding", - "metadata": map[string]interface{}{ - "name": "openshift-cluster-api-protect-" + providerName + "cluster", + binding := &admissionregistration.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: policy.GetName(), + }, + Spec: admissionregistration.ValidatingAdmissionPolicyBindingSpec{ + ParamRef: &admissionregistration.ParamRef{ + Name: "cluster", + ParameterNotFoundAction: ptr.To(admissionregistration.DenyAction), }, - "spec": map[string]interface{}{ - "paramRef": map[string]interface{}{ - "name": "cluster", - "parameterNotFoundAction": "Deny", - }, - "policyName": "openshift-cluster-api-protect-" + providerName + "cluster", - "validationActions": []interface{}{"Deny"}, - "matchResources": map[string]interface{}{ - "namespaceSelector": map[string]interface{}{ - "matchLabels": map[string]interface{}{ - "kubernetes.io/metadata.name": "openshift-cluster-api", - }, + PolicyName: policy.GetName(), + ValidationActions: []admissionregistration.ValidationAction{admissionregistration.Deny}, + MatchResources: &admissionregistration.MatchResources{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": capiNamespace, }, }, }, }, } - return append(objs, *policy, *binding) + // Set type metadata explicitly so it is present in the serialisation + for _, obj := range []client.Object{policy, binding} { + setTypeMetadataFromScheme(obj, "v1") + } + + // ValidatingAdmissionPolicy serialises with a redundant `status` field + policy = stripStatus(policy) + + return []client.Object{policy, binding} +} + +// stripStatus removes the status field from the serialisation of an object. +func stripStatus(obj client.Object) client.Object { + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + panic(err) + } + delete(unstructuredObj, "status") + return &unstructured.Unstructured{Object: unstructuredObj} +} + +func setTypeMetadataFromScheme(obj client.Object, version string) { + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + panic(err) + } + + // Get the GVK for the given version + for _, gvk := range gvks { + if gvk.Version == version { + obj.GetObjectKind().SetGroupVersionKind(gvk) + return + } + } + panic("no " + version + " GVK found") } diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/generate.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/generate.go new file mode 100644 index 0000000000..d885bcd55f --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/generate.go @@ -0,0 +1,161 @@ +package main + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "os" + "path" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/cluster-capi-operator/pkg/providerimages" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/kustomize/api/krusty" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/yaml" +) + +const ( + // manifestsFilename is the name of the file containing the generated manifests. + manifestsFilename = "manifests.yaml" + + // metadataFilename is the name of the file containing provider metadata. + metadataFilename = "metadata.yaml" + + // capiNamespace is the namespace where capi components are created. + capiNamespace = "openshift-cluster-api" +) + +func generateManifests(opts cmdlineOptions) error { + fmt.Printf("Processing provider %s\n", opts.name) + + resources, err := generateKustomizeResources(opts.kustomizeDir) + if err != nil { + return fmt.Errorf("failed to generate kustomize resources: %w", err) + } + + // Create output directory for profile + profileDir := path.Join(opts.manifestsPath, opts.profileName) + if err := os.MkdirAll(profileDir, os.ModeDir|0755); err != nil { + return fmt.Errorf("error creating profile directory %s: %w", profileDir, err) + } + + // Perform all manifest transformations + resources, err = processObjects(resources, opts) + if err != nil { + return fmt.Errorf("error processing objects: %w", err) + } + + // Write the manifest file + if err := writeManifests(opts, resources); err != nil { + return fmt.Errorf("error writing manifests: %w", err) + } + + // Write the metadata file + if err := writeMetadata(opts); err != nil { + return fmt.Errorf("error writing metadata: %w", err) + } + + return nil +} + +// generateKustomizeResources generates resources from a kustomize directory. +func generateKustomizeResources(kustomizeDir string) ([]client.Object, error) { + // Compile assets using kustomize. + fmt.Printf("> Generating OpenShift manifests based on kustomize.yaml from %q\n", kustomizeDir) + + k := krusty.MakeKustomizer(krusty.MakeDefaultOptions()) + fSys := filesys.MakeFsOnDisk() + + res, err := k.Run(fSys, kustomizeDir) + if err != nil { + return nil, fmt.Errorf("error fetching and compiling assets using kustomize: %w", err) + } + + resources := make([]client.Object, 0, len(res.Resources())) + for _, resource := range res.Resources() { + if resource == nil { + continue + } + + data, err := resource.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("error marshalling resource to JSON: %w", err) + } + + var unstructured unstructured.Unstructured + if err := json.Unmarshal(data, &unstructured); err != nil { + return nil, fmt.Errorf("error unmarshalling resource to unstructured: %w", err) + } + + resources = append(resources, &unstructured) + } + + return resources, nil +} + +func writeManifests(opts cmdlineOptions, resources []client.Object) (err error) { + manifestsPathname := path.Join(opts.manifestsPath, opts.profileName, manifestsFilename) + + manifestsFile, err := os.OpenFile(manifestsPathname, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return fmt.Errorf("error opening manifests file %s: %w", manifestsPathname, err) + } + + writer := bufio.NewWriter(manifestsFile) + defer func() { + err = errors.Join(err, + writer.Flush(), + manifestsFile.Close()) + }() + + for i, resource := range resources { + data, err := yaml.Marshal(resource) + if err != nil { + return fmt.Errorf("error marshalling object to YAML: %w", err) + } + + if i > 0 { + if _, err := writer.Write([]byte("---\n")); err != nil { + return fmt.Errorf("error writing separator to manifests file: %w", err) + } + } + + if _, err := writer.Write(data); err != nil { + return fmt.Errorf("error writing object to manifests file: %w", err) + } + } + + return nil +} + +func writeMetadata(opts cmdlineOptions) (err error) { + metadataPathname := path.Join(opts.manifestsPath, opts.profileName, metadataFilename) + + metadataFile, err := os.OpenFile(metadataPathname, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return fmt.Errorf("error opening metadata file %s: %w", metadataPathname, err) + } + defer func() { + err = errors.Join(err, metadataFile.Close()) + }() + + metadata := providerimages.ProviderMetadata{ + Name: opts.name, + SelfImageRef: opts.selfImageRef, + OCPPlatform: configv1.PlatformType(opts.platform), + InstallOrder: opts.installOrder, + Attributes: opts.attributes, + } + + data, err := yaml.Marshal(metadata) + if err != nil { + return fmt.Errorf("error marshalling metadata to YAML: %w", err) + } + if _, err := metadataFile.Write(data); err != nil { + return fmt.Errorf("error writing metadata to file: %w", err) + } + return nil +} diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/kustomization.yaml b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/kustomization.yaml new file mode 100644 index 0000000000..fd64162903 --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/kustomization.yaml @@ -0,0 +1,17 @@ +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +namespace: openshift-cluster-api + +patches: + +# Retain Secrets internally for reference tracking, but don't emit them +- target: + kind: Secret + patch: | + apiVersion: v1 + kind: Secret + metadata: + name: __ignored__ + annotations: + config.kubernetes.io/local-config: "true" diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/main.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/main.go index df2f2f497a..20838640c1 100644 --- a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/main.go +++ b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/main.go @@ -1,73 +1,144 @@ package main import ( + "errors" "flag" "fmt" "os" - "path" + "slices" + "strings" certmangerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + configv1 "github.com/openshift/api/config/v1" admissionregistration "k8s.io/api/admissionregistration/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/version" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ) var ( - basePath = flag.String("base-path", "", "path to the root of the provider's repository") - manifestsPath = flag.String("manifests-path", "", "path to the desired directory where to output the generated manifests") - kustomizeDir = flag.String("kustomize-dir", defaultKustomizeComponentsPath, "directory to search for kustomization.yaml file, relative to the base-path") - providerName = flag.String("provider-name", "", "name of the provider") - providerType = flag.String("provider-type", "", "type of the provider") - providerVersion = flag.String("provider-version", "", "version of the provider") - projDir string - - scheme = runtime.NewScheme() - manifestPrefix = "0000_30_cluster-api_" - targetNamespace = "openshift-cluster-api" + allowedPlatformTypes = []string{ + string(configv1.AWSPlatformType), + string(configv1.AzurePlatformType), + string(configv1.BareMetalPlatformType), + string(configv1.EquinixMetalPlatformType), + string(configv1.ExternalPlatformType), + string(configv1.GCPPlatformType), + string(configv1.IBMCloudPlatformType), + string(configv1.KubevirtPlatformType), + string(configv1.LibvirtPlatformType), + string(configv1.NonePlatformType), + string(configv1.NutanixPlatformType), + string(configv1.OpenStackPlatformType), + string(configv1.PowerVSPlatformType), + string(configv1.VSpherePlatformType), + } + + scheme = runtime.NewScheme() ) func init() { + // Required by findWebhookServiceSecretName utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(admissionregistration.AddToScheme(scheme)) utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) utilruntime.Must(certmangerv1.AddToScheme(scheme)) } +type cmdlineOptions struct { + manifestsPath string + profileName string + kustomizeDir string + name string + platform string + protectClusterResource string + selfImageRef string + installOrder int + attributes map[string]string +} + +// attributeFlags allows collecting multiple --attribute flags. +type attributeFlags map[string]string + +func (a attributeFlags) String() string { + return fmt.Sprintf("%v", map[string]string(a)) +} + +func (a attributeFlags) Set(value string) error { + parts := strings.SplitN(value, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("attribute must be in key=value format: %s", value) + } + + a[parts[0]] = parts[1] + + return nil +} + func main() { + var ( + manifestsPath = flag.String("manifests-path", "", "Path to the desired directory where to output the generated manifests. Required.") + profileName = flag.String("profile-name", "default", "Name of the profile, e.g 'featuregate-foo' (default: 'default'.'") + kustomizeDir = flag.String("kustomize-dir", "", "Directory containing kustomization.yaml file used to generate the base resources, relative to the current working directory. Required.") + + name = flag.String("name", "", "Name of the provider, e.g. 'cluster-api-provider-aws'. Required.") + + platform = flag.String("platform", "", "OpenShift platform type (i.e. the same value found in the Infrastructure object). Optional.") + protectClusterResource = flag.String("protect-cluster-resource", "", "Singular name of a cluster resource, e.g. 'awscluster'. Generates a ValidatingAdmissionPolicy which prevents modification of cluster resources created by the CAPI Operator. If provided matches any CRD in the manifests with this name. If not provided, matches any CRD in the manifests in the 'infrastructure.cluster.x-k8s.io' group whose plural name ends in 'clusters'. Optional.") + selfImageRef = flag.String("self-image-ref", "", "Image reference of the provider in generated manifests, e.g. registry.ci.openshift.org/openshift:aws-cluster-api-controllers. If specified, this string will be substituted with the provider's release image when the manifests are installed. Optional.") + installOrder = flag.Int("install-order", 0, "Order in which providers are installed. Lower values are installed first. Optional.") + ) + + attributes := make(attributeFlags) + flag.Var(attributes, "attribute", "Provider attribute in key=value format. Can be specified multiple times. Optional.") + flag.Parse() - if err := validateFlags(); err != nil { - fmt.Println(err) - os.Exit(1) + opts := cmdlineOptions{ + manifestsPath: *manifestsPath, + profileName: *profileName, + kustomizeDir: *kustomizeDir, + name: *name, + platform: *platform, + protectClusterResource: *protectClusterResource, + selfImageRef: *selfImageRef, + installOrder: *installOrder, + attributes: attributes, } - projDir = path.Join(*basePath) - - p := provider{ - Name: *providerName, - // TODO: improve validation - Type: clusterctlv1.ProviderType(*providerType), - Version: *providerVersion, + if err := validateFlags(opts); err != nil { + fmt.Println(err) + os.Exit(1) } - if err := importProvider(p); err != nil { + if err := generateManifests(opts); err != nil { fmt.Println(err) os.Exit(1) } } -func validateFlags() error { - if *providerName == "" || *providerType == "" || *providerVersion == "" || *basePath == "" || *manifestsPath == "" { - return fmt.Errorf("error mandatory flags must be specified") - } +func validateFlags(opts cmdlineOptions) error { + return errors.Join( + hasValue("kustomize directory", opts.kustomizeDir), + hasValue("manifests path", opts.manifestsPath), + hasValue("name", opts.name), - if _, err := version.ParseSemantic(*providerVersion); err != nil { - return fmt.Errorf("invalid version %s for provider %s", *providerVersion, *providerName) - } + func() error { + // If set, platform must be an allowed platform type + if opts.platform != "" { + if !slices.Contains(allowedPlatformTypes, opts.platform) { + return fmt.Errorf("invalid platform %s for provider %s. Allowed platforms are: %s", opts.platform, opts.name, strings.Join(allowedPlatformTypes, ", ")) + } + } + return nil + }(), + ) +} +func hasValue(description string, value string) error { + if value == "" { + return fmt.Errorf("%s is required", description) + } return nil } diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/providercustomizations.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/providercustomizations.go deleted file mode 100644 index 577c94c3c0..0000000000 --- a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/providercustomizations.go +++ /dev/null @@ -1,104 +0,0 @@ -package main - -import ( - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -func providerCustomizations(obj *unstructured.Unstructured, providerName string) { - switch providerName { - case "azure": - azureCustomizations(obj) - case "gcp": - gcpCustomizations(obj) - case powerVSProvider: - powerVSCustomizations(obj) - } -} - -func azureCustomizations(obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Deployment": - deployment := &appsv1.Deployment{} - if err := scheme.Convert(obj, deployment, nil); err != nil { - panic(err) - } - - // Modify bootstrap secret keys as they don't match with what is created by CCO. - for i := range deployment.Spec.Template.Spec.Containers { - container := &deployment.Spec.Template.Spec.Containers[i] - if container.Name == "manager" { - for j := range container.Env { - env := &container.Env[j] - switch env.Name { - case "AZURE_SUBSCRIPTION_ID": - env.ValueFrom.SecretKeyRef.Key = "azure_subscription_id" - case "AZURE_TENANT_ID": - env.ValueFrom.SecretKeyRef.Key = "azure_tenant_id" - case "AZURE_CLIENT_ID": - env.ValueFrom.SecretKeyRef.Key = "azure_client_id" - case "AZURE_CLIENT_SECRET": - env.ValueFrom.SecretKeyRef.Key = "azure_client_secret" - } - } - } - } - - if err := scheme.Convert(deployment, obj, nil); err != nil { - panic(err) - } - } -} - -func gcpCustomizations(obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Deployment": - deployment := &appsv1.Deployment{} - if err := scheme.Convert(obj, deployment, nil); err != nil { - panic(err) - } - - // Modify bootstrap secret keys as they don't match with what is created by CCO. - for i := range deployment.Spec.Template.Spec.Containers { - container := &deployment.Spec.Template.Spec.Containers[i] - if container.Name == "manager" { - for j := range container.Env { - env := &container.Env[j] - switch env.Name { - case "GOOGLE_APPLICATION_CREDENTIALS": - env.Value = "/home/.gcp/service_account.json" - } - } - } - } - - if err := scheme.Convert(deployment, obj, nil); err != nil { - panic(err) - } - } -} - -func powerVSCustomizations(obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Deployment": - deployment := &appsv1.Deployment{} - if err := scheme.Convert(obj, deployment, nil); err != nil { - panic(err) - } - - for i := range deployment.Spec.Template.Spec.Containers { - container := &deployment.Spec.Template.Spec.Containers[i] - if container.Name == "manager" { - for j := range container.Args { - arg := &container.Args[j] - if *arg == "--provider-id-fmt=${PROVIDER_ID_FORMAT:=v1}" { - container.Args[j] = "--provider-id-fmt=v2" - } - } - } - } - if err := scheme.Convert(deployment, obj, nil); err != nil { - panic(err) - } - } -} diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/providers.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/providers.go deleted file mode 100644 index c290d4be45..0000000000 --- a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/providers.go +++ /dev/null @@ -1,270 +0,0 @@ -package main - -import ( - "bytes" - "context" - "fmt" - "os" - "path" - "strings" - - "github.com/klauspost/compress/zstd" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" - configclient "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" - "sigs.k8s.io/cluster-api/cmd/clusterctl/client/repository" - "sigs.k8s.io/cluster-api/cmd/clusterctl/client/yamlprocessor" - utilyaml "sigs.k8s.io/cluster-api/util/yaml" - "sigs.k8s.io/yaml" -) - -const ( - powerVSProvider = "powervs" - ibmCloudProvider = "ibmcloud" - coreCAPIProvider = "cluster-api" - metadataFilePath = "./metadata.yaml" - defaultKustomizeComponentsPath = "./config/default" - // customizedComponentsFilename is a name for file containing customized infrastructure components. - // This file helps with code review as it is always uncompressed unlike the components configMap. - customizedComponentsFilename = "infrastructure-components-openshift.yaml" -) - -type provider struct { - Name string `json:"name"` - Type clusterctlv1.ProviderType `json:"type"` - Version string `json:"version"` - components repository.Components - metadata []byte -} - -// loadComponents loads components from the given provider. -func (p *provider) loadComponents() error { - // Create new clusterctl config client - configClient, err := configclient.New(context.Background(), "") - if err != nil { - return fmt.Errorf("error creating clusterctl config client: %w", err) - } - - // Create new clusterctl provider client - providerConfig, err := configClient.Providers().Get(p.Name, p.Type) - if err != nil { - return fmt.Errorf("error creating clusterctl provider client: %w", err) - } - - // Set options for yaml processor - options := repository.ComponentsOptions{ - TargetNamespace: targetNamespace, - SkipTemplateProcess: true, - } - - // Compile assets using kustomize. - kustomizeComponentsPath := path.Join(projDir, *kustomizeDir) - fmt.Printf("> Generating OpenShift manifests based on kustomize.yaml from %q\n", kustomizeComponentsPath) - rawComponents, err := fetchAndCompileComponents(kustomizeComponentsPath) - if err != nil { - return fmt.Errorf("error fetching and compiling assets using kustomize: %w", err) - } - - // Ininitialize new clusterctl repository components, this should some yaml processing - p.components, err = repository.NewComponents(repository.ComponentsInput{ - Provider: providerConfig, - ConfigClient: configClient, - Processor: yamlprocessor.NewSimpleProcessor(), - RawYaml: rawComponents, - Options: options, - }) - if err != nil { - return fmt.Errorf("error initializing new clusterctl components repository: %w", err) - } - - content, err := os.ReadFile(path.Join(projDir, metadataFilePath)) - if err != nil { - return fmt.Errorf("error while reading metadata file: %w", err) - } - - p.metadata = content - - return nil -} - -func (p *provider) providerTypeName() string { - return strings.ReplaceAll(strings.ToLower(string(p.Type)), "provider", "") -} - -// writeProviderComponentsToManifest allows to write provider components directly to a manifest. -// This differs from writeProviderComponentsConfigmap as it won't store the components in a ConfigMap -// but directly on the YAML manifests as YAML representation of an unstructured objects. -func (p *provider) writeProviderComponentsToManifest(fileName string, objs []unstructured.Unstructured) error { - if len(objs) == 0 { - return nil - } - - combined, err := utilyaml.FromUnstructured(objs) - if err != nil { - return err - } - - return os.WriteFile(path.Join(*manifestsPath, fileName), ensureNewLine(combined), 0600) -} - -// writeProviderCustomizedComponents writes the customized infrastructure components to allow for code review -func (p *provider) writeProviderCustomizedComponents(resourceMap map[resourceKey][]unstructured.Unstructured) error { - crds, err := utilyaml.FromUnstructured(resourceMap[crdKey]) - if err != nil { - return fmt.Errorf("error converting unstructured object to YAML: %w", err) - } - - other, err := utilyaml.FromUnstructured(resourceMap[otherKey]) - if err != nil { - return fmt.Errorf("error converting unstructured object to YAML: %w", err) - } - - combined := utilyaml.JoinYaml(crds, other) - - return os.WriteFile(path.Join(*basePath, "openshift", customizedComponentsFilename), ensureNewLine(combined), 0600) -} - -// writeProviderComponentsConfigmap allows to write provider components to the provider (transport) ConfigMap. -func (p *provider) writeProviderComponentsConfigmap(fileName string, objs []unstructured.Unstructured) error { - annotations := openshiftAnnotations - annotations[featureSetAnnotationKey] = featureSetAnnotationValue - - cm := corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: p.Name, - Namespace: targetNamespace, - Labels: map[string]string{ - "provider.cluster.x-k8s.io/name": p.Name, - "provider.cluster.x-k8s.io/type": p.providerTypeName(), - "provider.cluster.x-k8s.io/version": p.Version, - }, - Annotations: annotations, - }, - Data: map[string]string{ - "metadata": string(p.metadata), - }, - } - - cm, err := addComponentsToConfigMap(cm, objs) - if err != nil { - return fmt.Errorf("failed to inject components into configmap: %w", err) - } - - cmYaml, err := yaml.Marshal(cm) - if err != nil { - return fmt.Errorf("error marshaling provider ConfigMap to YAML: %w", err) - } - - return os.WriteFile(path.Join(*manifestsPath, fileName), ensureNewLine(cmYaml), 0600) -} - -// addComponentsToConfigMap adds the components to configMap. The components are compressed if their size would be over 950KB. -func addComponentsToConfigMap(cm corev1.ConfigMap, objs []unstructured.Unstructured) (corev1.ConfigMap, error) { - combined, err := utilyaml.FromUnstructured(objs) - if err != nil { - return corev1.ConfigMap{}, fmt.Errorf("error converting unstructured object to YAML: %w", err) - } - - compressionRequired := len(combined) > 950_000 // 98KB under the configMap 1MiB limit to leave space for rest of the configMap - if compressionRequired { - compressed, err := compressToZstd(combined) - if err != nil { - return corev1.ConfigMap{}, fmt.Errorf("failed to compress components: %w", err) - } - - cm.BinaryData = map[string][]byte{ - "components-zstd": compressed.Bytes(), - } - } else { - cm.Data["components"] = string(combined) - } - - return cm, nil -} - -// compressToZstd compressses bytes using zstd -func compressToZstd(data []byte) (bytes.Buffer, error) { - var compressed bytes.Buffer - - writer, err := zstd.NewWriter(&compressed, zstd.WithEncoderLevel(zstd.SpeedBestCompression)) - if err != nil { - return bytes.Buffer{}, fmt.Errorf("failed to initialize zstd writer: %w", err) - } - defer writer.Close() - - _, err = writer.Write(data) - if err != nil { - return bytes.Buffer{}, fmt.Errorf("failed to encode using zstd: %w", err) - } - - err = writer.Close() - if err != nil { - return bytes.Buffer{}, fmt.Errorf("failed to close zstd writer: %w", err) - } - - return compressed, nil -} - -func importProvider(p provider) error { - fmt.Printf("Processing provider %s\n", p.Name) - - // Load manifests from github for specific provider - - // for Power VS the upstream cluster api provider name is ibmcloud - // https://github.com/kubernetes-sigs/cluster-api/blob/main/cmd/clusterctl/client/config/providers_client.go#L210-L214 - var initialProviderName string - if p.Name == powerVSProvider { - initialProviderName = powerVSProvider - p.Name = ibmCloudProvider - } - - if err := p.loadComponents(); err != nil { - return fmt.Errorf("error loading provider components: %w", err) - } - if p.Name == powerVSProvider { - p.Name = ibmCloudProvider - } - - // Perform all manifest transformations - - // We need to perform Power VS specific customization which may not needed for ibmcloud - if initialProviderName == powerVSProvider { - p.Name = powerVSProvider - } - resourceMap := processObjects(p.components.Objs(), p.Name) - - // Write RBAC components to manifests, they will be managed by CVO - if p.Name == powerVSProvider { - p.Name = ibmCloudProvider - } - - // Write modified infrastructure components to allow for code review. - if err := p.writeProviderCustomizedComponents(resourceMap); err != nil { - return fmt.Errorf("error writing %v file: %w", customizedComponentsFilename, err) - } - - // Store provider components in the provider ConfigMap. - cmFileName := fmt.Sprintf("%s04_cm.%s-%s.yaml", manifestPrefix, strings.ToLower(p.providerTypeName()), p.Name) - if err := p.writeProviderComponentsConfigmap(cmFileName, resourceMap[otherKey]); err != nil { - return fmt.Errorf("error writing provider ConfigMap: %w", err) - } - - // Optionally write a separate CRD manifest file, - // to apply CRDs directly via CVO rather than through the cluster-capi-operator, - // useful in cases where the platform is not supported but some CRDs are needed - // by other OCP operators other than the cluster-capi-operator. - if len(resourceMap[crdKey]) > 0 { - cmFileName := fmt.Sprintf("%s04_crd.%s-%s.yaml", manifestPrefix, strings.ToLower(p.providerTypeName()), p.Name) - if err := p.writeProviderComponentsToManifest(cmFileName, resourceMap[crdKey]); err != nil { - return fmt.Errorf("error writing provider CRDs: %w", err) - } - } - - return nil -} diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/util.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/util.go new file mode 100644 index 0000000000..8facedbc8a --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/manifests-gen/util.go @@ -0,0 +1,25 @@ +package main + +import "sigs.k8s.io/controller-runtime/pkg/client" + +func getKind(obj client.Object) string { + return obj.GetObjectKind().GroupVersionKind().Kind +} + +func getGroup(obj client.Object) string { + return obj.GetObjectKind().GroupVersionKind().Group +} + +func convert[T client.Object](from client.Object, to T) error { + if err := scheme.Convert(from, to, nil); err != nil { + return err + } + to.GetObjectKind().SetGroupVersionKind(from.GetObjectKind().GroupVersionKind()) + return nil +} + +func mustConvert[T client.Object](from client.Object, to T) { + if err := convert(from, to); err != nil { + panic(err) + } +} diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/pkg/providerimages/providerimages.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/pkg/providerimages/providerimages.go new file mode 100644 index 0000000000..d8f3eca59c --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/pkg/providerimages/providerimages.go @@ -0,0 +1,513 @@ +/* +Copyright 2024 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package providerimages + +import ( + "archive/tar" + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" + + "github.com/go-logr/logr" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + configv1 "github.com/openshift/api/config/v1" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +const ( + capiManifestsDirName = "capi-operator-manifests" + capiManifestsDir = "/" + capiManifestsDirName + metadataFile = "metadata.yaml" + manifestsFile = "manifests.yaml" + + pullSecretName = "pull-secret" + pullSecretNamespace = "openshift-config" //nolint:gosec // Not a credential, just a namespace name + pullSecretKey = ".dockerconfigjson" //nolint:gosec // Not a credential, just a key name + + // AttributeKeyType is the key for the provider type attribute. + AttributeKeyType = "type" + // AttributeKeyVersion is the key for the provider version attribute. + AttributeKeyVersion = "version" +) + +// ProviderImageManifests represents metadata and manifests read from a provider image. +type ProviderImageManifests struct { + ProviderMetadata + + ImageRef string + Profile string + + ContentID string + ManifestsPath string +} + +// ProviderMetadata is metadata about a provider image provided in the metadata.yaml file. +type ProviderMetadata struct { + Name string `json:"name"` + Attributes map[string]string `json:"attributes,omitempty"` + OCPPlatform configv1.PlatformType `json:"ocpPlatform,omitempty"` + SelfImageRef string `json:"selfImageRef,omitempty"` + InstallOrder int `json:"installOrder,omitempty"` +} + +// MatchesPlatform reports whether this profile should be installed on the +// given cluster platform. Profiles with no OCPPlatform set (empty string) +// are platform-independent and match every cluster. +func (m ProviderMetadata) MatchesPlatform(clusterPlatform configv1.PlatformType) bool { + return m.OCPPlatform == "" || m.OCPPlatform == clusterPlatform +} + +// imageFetcher abstracts fetching container images for testability. +type imageFetcher interface { + Fetch(ctx context.Context, ref name.Reference) (v1.Image, error) +} + +// remoteImageFetcher fetches images from a remote registry. +type remoteImageFetcher struct { + keychain authn.Keychain +} + +// Fetch fetches an image from a remote registry. +func (r remoteImageFetcher) Fetch(ctx context.Context, ref name.Reference) (v1.Image, error) { + img, err := remote.Image(ref, remote.WithAuthFromKeychain(r.keychain), remote.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("failed to fetch remote image: %w", err) + } + + return img, nil +} + +// ReadProviderImages returns a list of ProviderImageManifests read directly +// from operand container images. +// +// containerImages is a map of provider names to provider image references +// +// A provider image may contain a /capi-operator-manifests directory containing the following 2 files: +// - metadata.yaml: a YAML file whose contents are a ProviderMetadata struct +// - manifests.yaml: a KRM containing the provider manifests +// +// If a provider image does not contain a /capi-operator-manifests directory, it is ignored. +// If a provider image contains /capi-operator-manifests but one of the required files is missing, an error is returned. +// +// ReadProviderImages fetches each provider image. If it contains valid CAPI Operator +// manifests, the contents are stored in a local cache directory specified by +// providerImageDir. Manifests are written to a subdirectory named after the +// image reference. +// +// When writing manifests to the cache, any occurrences of `manifestImageName` as +// specified in the provider's metadata.yaml are replaced with the image +// reference. +// +// The pull secret is fetched from the "pull-secret" Secret in the "openshift-config" +// namespace using the provided client.Reader. +func ReadProviderImages(ctx context.Context, k8sClient client.Reader, log logr.Logger, containerImages []string, providerImageDir string) ([]ProviderImageManifests, error) { + var secret corev1.Secret + if err := k8sClient.Get(ctx, types.NamespacedName{Name: pullSecretName, Namespace: pullSecretNamespace}, &secret); err != nil { + return nil, fmt.Errorf("failed to get pull secret: %w", err) + } + + pullSecret := secret.Data[pullSecretKey] + + keychain, err := parseDockerConfig(pullSecret) + if err != nil { + return nil, fmt.Errorf("failed to parse pull secret: %w", err) + } + + return readProviderImages(ctx, log, containerImages, providerImageDir, remoteImageFetcher{keychain: keychain}) +} + +type providerImageResult struct { + imageRef string + manifests []ProviderImageManifests + err error +} + +func readProviderImages(ctx context.Context, log logr.Logger, containerImages []string, providerImageDir string, fetcher imageFetcher) ([]ProviderImageManifests, error) { + log.Info("looking for provider manifests in container images") + + results := make(chan providerImageResult, len(containerImages)) + g, ctx := errgroup.WithContext(ctx) + + g.SetLimit(5) // Limit to 5 concurrent fetches + + for _, imageRef := range containerImages { + g.Go(func() error { + manifests, err := processProviderImage(ctx, imageRef, providerImageDir, fetcher) + results <- providerImageResult{ + imageRef: imageRef, + manifests: manifests, + err: err, + } + + return nil // we're returning + }) + } + + _ = g.Wait() // We're not actually returning errors directly + + close(results) + + var providerImages []ProviderImageManifests + + var err error + + for result := range results { + switch { + case result.err != nil: + log.Error(result.err, "failed to process provider image", "image", result.imageRef) + err = errors.Join(err, fmt.Errorf("fetching provider from image %s: %w", result.imageRef, result.err)) + case len(result.manifests) == 0: + log.Info("no provider manifests found in container image", "image", result.imageRef) + default: + for _, manifest := range result.manifests { + log.Info("found provider manifests in container image", "image", result.imageRef, + "provider", manifest.Name, + "type", manifest.Attributes[AttributeKeyType], + "version", manifest.Attributes[AttributeKeyVersion], + "profile", manifest.Profile, + "ocpPlatform", manifest.OCPPlatform) + + providerImages = append(providerImages, manifest) + } + } + } + + log.Info("finished looking for provider manifests in container images") + + if err != nil { + return nil, err + } + + return providerImages, nil +} + +func processProviderImage(ctx context.Context, imageRef, providerImageDir string, fetcher imageFetcher) ([]ProviderImageManifests, error) { + ref, err := name.ParseReference(imageRef) + if err != nil { + return nil, fmt.Errorf("failed to parse image reference %s: %w", imageRef, err) + } + + img, err := fetcher.Fetch(ctx, ref) + if err != nil { + return nil, fmt.Errorf("failed to fetch image %s: %w", imageRef, err) + } + + // Create output directory for this provider image + sanitizedRef := sanitizeImageRef(imageRef) + outputDir := filepath.Join(providerImageDir, sanitizedRef) + + if err := os.MkdirAll(outputDir, 0750); err != nil { + return nil, fmt.Errorf("failed to create output directory: %w", err) + } + + // Extract files from the image to disk + if err := extractCapiManifestsToDir(img, outputDir); err != nil { + return nil, fmt.Errorf("failed to extract manifests from image to %s: %w", outputDir, err) + } + + // Discover profiles from extracted files + profiles, err := discoverProfiles(outputDir) + if err != nil { + if errors.Is(err, errNoCapiManifests) { + // Image doesn't contain /capi-operator-manifests, skip it + return nil, nil + } + + return nil, fmt.Errorf("failed to discover profiles in %s: %w", outputDir, err) + } + + // Process each profile + results := make([]ProviderImageManifests, 0, len(profiles)) + + for _, profile := range profiles { + profileDir := filepath.Join(outputDir, profile.Profile) + manifestsPath := filepath.Join(profileDir, manifestsFile) + + contentID, err := writeManifestsWithHash(manifestsPath, profile.Manifests, profile.Metadata.SelfImageRef, imageRef) + if err != nil { + return nil, fmt.Errorf("failed to write manifests for profile %s: %w", profile.Profile, err) + } + + results = append(results, ProviderImageManifests{ + ProviderMetadata: *profile.Metadata, + ImageRef: imageRef, + Profile: profile.Profile, + ContentID: contentID, + ManifestsPath: manifestsPath, + }) + } + + return results, nil +} + +var ( + errNoCapiManifests = errors.New("no capi-manifests directory found") + errMissingMetadata = errors.New("missing metadata.yaml in /capi-operator-manifests") + errMissingManifests = errors.New("missing manifests.yaml in /capi-operator-manifests") +) + +// profileManifests holds parsed metadata and manifest content for a single profile. +type profileManifests struct { + Profile string + Metadata *ProviderMetadata + Manifests string +} + +// discoverProfiles scans a directory for valid profiles. +// Each subdirectory must contain both metadata.yaml and manifests.yaml. +// Returns an error if no profiles are found or if any profile is incomplete. +func discoverProfiles(dir string) ([]profileManifests, error) { + entries, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil, errNoCapiManifests + } + + return nil, fmt.Errorf("failed to read directory %s: %w", dir, err) + } + + var profiles []profileManifests + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + profileName := entry.Name() + profileDir := filepath.Join(dir, profileName) + + profile, isProfile, err := loadProfile(profileName, profileDir) + if err != nil { + return nil, err + } + + if isProfile { + profiles = append(profiles, *profile) + } + } + + if len(profiles) == 0 { + return nil, errNoCapiManifests + } + + return profiles, nil +} + +// loadProfile loads a single profile from a directory. +// Returns isProfile=false if the directory is not a profile (no metadata.yaml or manifests.yaml). +// Returns an error if the profile is incomplete (has one file but not the other). +func loadProfile(profileName, profileDir string) (profile *profileManifests, isProfile bool, err error) { + metadataPath := filepath.Join(profileDir, metadataFile) + manifestsPath := filepath.Join(profileDir, manifestsFile) + + metadataInfo, metadataErr := os.Stat(metadataPath) + manifestsInfo, manifestsErr := os.Stat(manifestsPath) + + metadataExists := metadataErr == nil && !metadataInfo.IsDir() + manifestsExists := manifestsErr == nil && !manifestsInfo.IsDir() + + if !metadataExists && !manifestsExists { + return nil, false, nil + } + + if !metadataExists { + return nil, false, fmt.Errorf("profile %s: %w", profileName, errMissingMetadata) + } + + if !manifestsExists { + return nil, false, fmt.Errorf("profile %s: %w", profileName, errMissingManifests) + } + + metadataContent, err := os.ReadFile(metadataPath) //nolint:gosec // path constructed from trusted input + if err != nil { + return nil, false, fmt.Errorf("failed to read metadata for profile %s: %w", profileName, err) + } + + manifestsContent, err := os.ReadFile(manifestsPath) //nolint:gosec // path constructed from trusted input + if err != nil { + return nil, false, fmt.Errorf("failed to read manifests for profile %s: %w", profileName, err) + } + + var metadata ProviderMetadata + if err := yaml.Unmarshal(metadataContent, &metadata); err != nil { + return nil, false, fmt.Errorf("failed to parse metadata.yaml for profile %s: %w", profileName, err) + } + + return &profileManifests{ + Profile: profileName, + Metadata: &metadata, + Manifests: string(manifestsContent), + }, true, nil +} + +// extractCapiManifestsToDir extracts all files under /capi-operator-manifests from the +// image to destDir. Iterates layers top to bottom so higher layers take precedence. +func extractCapiManifestsToDir(img v1.Image, destDir string) error { + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("failed to get image layers: %w", err) + } + + // Iterate layers in reverse order (top to bottom) since higher layers + // take precedence. extractFilesToDir skips files that already exist. + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + + rc, err := layer.Uncompressed() + if err != nil { + return fmt.Errorf("failed to uncompress layer: %w", err) + } + + err = extractFilesToDir(rc, capiManifestsDir, destDir) + + err = errors.Join(err, rc.Close()) + if err != nil { + return err + } + } + + return nil +} + +// extractFilesToDir extracts all files under prefix from a tar stream to destDir. +// Files are written preserving their relative path under the prefix. +// e.g., prefix="/capi-operator-manifests", file="/capi-operator-manifests/default/foo.yaml" +// → written to destDir/default/foo.yaml +// Files that already exist are skipped (caller iterates layers top to bottom). +func extractFilesToDir(r io.Reader, prefix, destDir string) error { + tr := tar.NewReader(r) + // Ensure prefix has leading slash and no trailing slash for consistent matching + prefix = path.Clean("/" + prefix) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + + if err != nil { + return fmt.Errorf("failed to read tar: %w", err) + } + + // Directory entries in tar are just markers with no content. + // We create directories on-demand when writing files. + if header.Typeflag == tar.TypeDir { + continue + } + + // Normalize the path (remove leading ./ or /) + // Use path (not filepath) since tar always uses forward slashes + normalized := path.Clean("/" + header.Name) + + // Check if file is under our prefix + if !strings.HasPrefix(normalized, prefix+"/") { + continue + } + + // Get relative path under prefix + relPath := strings.TrimPrefix(normalized, prefix+"/") + + // Write file to destination + destPath := filepath.Join(destDir, relPath) + + // Skip if file already exists (higher layer already wrote it) + if _, err := os.Stat(destPath); err == nil { + continue + } + + if err := os.MkdirAll(filepath.Dir(destPath), 0750); err != nil { + return fmt.Errorf("failed to create directory for %s: %w", destPath, err) + } + + if err := writeFileFromTar(tr, destPath); err != nil { + return fmt.Errorf("failed to write %s: %w", destPath, err) + } + } + + return nil +} + +func writeFileFromTar(tr *tar.Reader, destPath string) (err error) { + f, err := os.Create(destPath) //nolint:gosec // path is constructed internally + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + + defer func() { + err = errors.Join(err, f.Close()) + }() + + if _, err := io.Copy(f, tr); err != nil { + return fmt.Errorf("failed to write file: %w", err) + } + + return nil +} + +func sanitizeImageRef(imageRef string) string { + // Replace characters that aren't valid in directory names + replacer := strings.NewReplacer( + "/", "_", + ":", "_", + "@", "_", + ) + + return replacer.Replace(imageRef) +} + +// writeManifestsWithHash writes manifest content to a file while calculating its hash. +// If manifestImageName is non-empty, it replaces occurrences with imageRef during streaming. +// Returns the sha256 hex-encoded hash of the final content. +func writeManifestsWithHash(path, content, manifestImageName, imageRef string) (_ string, err error) { + f, err := os.Create(path) //nolint:gosec + if err != nil { + return "", fmt.Errorf("failed to create manifests file: %w", err) + } + + defer func() { + err = errors.Join(err, f.Close()) + }() + + hash := sha256.New() + mw := io.MultiWriter(f, hash) + + if manifestImageName != "" { + replacer := strings.NewReplacer(manifestImageName, imageRef) + if _, err := replacer.WriteString(mw, content); err != nil { + return "", fmt.Errorf("failed to write manifests: %w", err) + } + } else { + if _, err := io.WriteString(mw, content); err != nil { + return "", fmt.Errorf("failed to write manifests: %w", err) + } + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/pkg/providerimages/pullsecret.go b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/pkg/providerimages/pullsecret.go new file mode 100644 index 0000000000..af99238eae --- /dev/null +++ b/openshift/tools/vendor/github.com/openshift/cluster-capi-operator/pkg/providerimages/pullsecret.go @@ -0,0 +1,75 @@ +/* +Copyright 2024 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package providerimages + +import ( + "bytes" + "fmt" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/types" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +func parseDockerConfig(pullSecret []byte) (authn.Keychain, error) { + if len(pullSecret) == 0 { + return authn.DefaultKeychain, nil + } + + cf, err := config.LoadFromReader(bytes.NewReader(pullSecret)) + if err != nil { + return nil, fmt.Errorf("failed to parse docker config: %w", err) + } + + return &configFileKeychain{cf: cf}, nil +} + +// configFileKeychain implements authn.Keychain using a docker config file. +type configFileKeychain struct { + cf *configfile.ConfigFile +} + +// Resolve resolves the authentication credentials for a given resource. +func (k *configFileKeychain) Resolve(resource authn.Resource) (authn.Authenticator, error) { + // Try to get auth config for the registry + key := resource.RegistryStr() + if key == name.DefaultRegistry { + key = authn.DefaultAuthKey + } + + cfg, err := k.cf.GetAuthConfig(key) + if err != nil { + return nil, fmt.Errorf("failed to get auth config for %s: %w", key, err) + } + + // Check if we got an empty config + empty := types.AuthConfig{} + cfg.ServerAddress = "" // Clear for comparison + + if cfg == empty { + return authn.Anonymous, nil + } + + return authn.FromConfig(authn.AuthConfig{ + Username: cfg.Username, + Password: cfg.Password, + Auth: cfg.Auth, + IdentityToken: cfg.IdentityToken, + RegistryToken: cfg.RegistryToken, + }), nil +} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.dockerignore b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.dockerignore deleted file mode 100644 index 7b5883475d..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.gitattributes deleted file mode 100644 index 34a0a21a36..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.gitattributes +++ /dev/null @@ -1,4 +0,0 @@ -* text=auto - -benchmark/benchmark.toml text eol=lf -testdata/** text eol=lf diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.gitignore b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.gitignore deleted file mode 100644 index 4b7c4eda3a..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen -dist -tests/ diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.golangci.toml deleted file mode 100644 index 067db55174..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.golangci.toml +++ /dev/null @@ -1,84 +0,0 @@ -[service] -golangci-lint-version = "1.39.0" - -[linters-settings.wsl] -allow-assign-and-anything = true - -[linters-settings.exhaustive] -default-signifies-exhaustive = true - -[linters] -disable-all = true -enable = [ - "asciicheck", - "bodyclose", - "cyclop", - "deadcode", - "depguard", - "dogsled", - "dupl", - "durationcheck", - "errcheck", - "errorlint", - "exhaustive", - # "exhaustivestruct", - "exportloopref", - "forbidigo", - # "forcetypeassert", - "funlen", - "gci", - # "gochecknoglobals", - "gochecknoinits", - "gocognit", - "goconst", - "gocritic", - "gocyclo", - "godot", - "godox", - # "goerr113", - "gofmt", - "gofumpt", - "goheader", - "goimports", - "golint", - "gomnd", - # "gomoddirectives", - "gomodguard", - "goprintffuncname", - "gosec", - "gosimple", - "govet", - # "ifshort", - "importas", - "ineffassign", - "lll", - "makezero", - "misspell", - "nakedret", - "nestif", - "nilerr", - # "nlreturn", - "noctx", - "nolintlint", - #"paralleltest", - "prealloc", - "predeclared", - "revive", - "rowserrcheck", - "sqlclosecheck", - "staticcheck", - "structcheck", - "stylecheck", - # "testpackage", - "thelper", - "tparallel", - "typecheck", - "unconvert", - "unparam", - "unused", - "varcheck", - "wastedassign", - "whitespace", - # "wrapcheck", - # "wsl" -] diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml deleted file mode 100644 index ec52857a3e..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ /dev/null @@ -1,127 +0,0 @@ -version: 2 -before: - hooks: - - go mod tidy - - go fmt ./... - - go test ./... -builds: - - id: tomll - main: ./cmd/tomll - binary: tomll - env: - - CGO_ENABLED=0 - flags: - - -trimpath - ldflags: - - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - linux_amd64 - - linux_arm64 - - linux_arm - - linux_riscv64 - - windows_amd64 - - windows_arm64 - - windows_arm - - darwin_amd64 - - darwin_arm64 - - id: tomljson - main: ./cmd/tomljson - binary: tomljson - env: - - CGO_ENABLED=0 - flags: - - -trimpath - ldflags: - - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - linux_amd64 - - linux_arm64 - - linux_arm - - linux_riscv64 - - windows_amd64 - - windows_arm64 - - windows_arm - - darwin_amd64 - - darwin_arm64 - - id: jsontoml - main: ./cmd/jsontoml - binary: jsontoml - env: - - CGO_ENABLED=0 - flags: - - -trimpath - ldflags: - - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - linux_amd64 - - linux_arm64 - - linux_riscv64 - - linux_arm - - windows_amd64 - - windows_arm64 - - windows_arm - - darwin_amd64 - - darwin_arm64 -universal_binaries: - - id: tomll - replace: true - name_template: tomll - - id: tomljson - replace: true - name_template: tomljson - - id: jsontoml - replace: true - name_template: jsontoml -archives: -- id: jsontoml - format: tar.xz - builds: - - jsontoml - files: - - none* - name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" -- id: tomljson - format: tar.xz - builds: - - tomljson - files: - - none* - name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" -- id: tomll - format: tar.xz - builds: - - tomll - files: - - none* - name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" -dockers: - - id: tools - goos: linux - goarch: amd64 - ids: - - jsontoml - - tomljson - - tomll - image_templates: - - "ghcr.io/pelletier/go-toml:latest" - - "ghcr.io/pelletier/go-toml:{{ .Tag }}" - - "ghcr.io/pelletier/go-toml:v{{ .Major }}" - skip_push: false -checksum: - name_template: 'sha256sums.txt' -snapshot: - name_template: "{{ incpatch .Version }}-next" -release: - github: - owner: pelletier - name: go-toml - draft: true - prerelease: auto - mode: replace -changelog: - use: github-native -announce: - skip: true diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md deleted file mode 100644 index 96ecf9e2b3..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md +++ /dev/null @@ -1,193 +0,0 @@ -# Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use and efficient TOML -implementation for Go that gets the job done and gets out of your way – dealing -with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or small, is -more than welcomed! - -## Ask questions - -Any question you may have, somebody else might have it too. Always feel free to -ask them on the [discussion board][discussions]. We will try to answer them as -clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and ask -away! - -[discussions]: https://github.com/pelletier/go-toml/discussions - -## Improve the documentation - -The best way to share your knowledge and experience with go-toml is to improve -the documentation. Fix a typo, clarify an interface, add an example, anything -goes! - -The documentation is present in the [README][readme] and thorough the source -code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change -to the documentation, create a pull request with your proposed changes. For -simple changes like that, the easiest way to go is probably the "Fork this -project and edit the file" button on Github, displayed at the top right of the -file. Unless it's a trivial change (for example a typo), provide a little bit of -context in your pull request description or commit message. - -## Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and fix by -reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on what to -include. When in doubt: add more details! By reducing ambiguity and providing -more information, it decreases back and forth and saves everyone time. - -## Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -- A short proposal with some POC code is better than a lengthy piece of text - with no code. Code speaks louder than words. That being said, bigger changes - should probably start with a [discussion][discussions]. -- No backward-incompatible patch will be accepted unless discussed. Sometimes - it's hard, but we try not to break people's programs unless we absolutely have - to. -- If you are writing a new feature or extending an existing one, make sure to - write some documentation. -- Bug fixes need to be accompanied with regression tests. -- New code needs to be tested. -- Your commit messages need to explain why the change is needed, even if already - included in the PR description. - -It does sound like a lot, but those best practices are here to save time overall -and continuously improve the quality of the project, which is something everyone -benefits from. - -### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -### Run the tests - -You can run tests for go-toml using Go's test tool: `go test -race ./...`. - -During the pull request process, all tests will be ran on Linux, Windows, and -MacOS on the last two versions of Go. - -However, given GitHub's new policy to _not_ run Actions on pull requests until a -maintainer clicks on button, it is highly recommended that you run them locally -as you make changes. - -### Check coverage - -We use `go tool cover` to compute test coverage. Most code editors have a way to -run and display code coverage, but at the end of the day, we do this: - -``` -go test -covermode=atomic -coverprofile=coverage.out -go tool cover -func=coverage.out -``` - -and verify that the overall percentage of tested code does not go down. This is -a requirement. As a rule of thumb, all lines of code touched by your changes -should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your -code lowers the coverage. - -### Verify performance - -Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's -builtin benchmark systems. Because of their noisy nature, containers provided by -Github Actions cannot be reliably used for benchmarking. As a result, you are -responsible for checking that your changes do not incur a performance penalty. -You can run their following to execute benchmarks: - -``` -go test ./... -bench=. -count=10 -``` - -Benchmark results should be compared against each other with -[benchstat][benchstat]. Typical flow looks like this: - -1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to - a file (for example `old.txt`). -2. Make some code changes. -3. Run `go test ....` again, and save the output to an other file (for example - `new.txt`). -4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any - test. - -On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts -performance. - -It is highly encouraged to add the benchstat results to your pull request -description. Pull requests that lower performance will receive more scrutiny. - -[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat - -### Style - -Try to look around and follow the same format and structure as the rest of the -code. We enforce using `go fmt` on the whole code base. - ---- - -## Maintainers-only - -### Merge pull request - -Checklist: - -- Passing CI. -- Does not introduce backward-incompatible changes (unless discussed). -- Has relevant doc changes. -- Benchstat does not show performance regression. -- Pull request is [labeled appropriately][pr-labels]. -- Title will be understandable in the changelog. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -### New release - -1. Decide on the next version number. Use semver. Review commits since last - version to assess. -2. Tag release. For example: -``` -git checkout v2 -git pull -git tag v2.2.0 -git push --tags -``` -3. CI automatically builds a draft Github release. Review it and edit as - necessary. Look for "Other changes". That would indicate a pull request not - labeled properly. Tweak labels and pull request titles until changelog looks - good for users. -4. Check "create discussion" box, in the "Releases" category. -5. If new version is an alpha or beta only, check pre-release box. - - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[new-release]: https://github.com/pelletier/go-toml/releases/new -[gh]: https://github.com/cli/cli -[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/Dockerfile deleted file mode 100644 index b9e9332379..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM scratch -ENV PATH "$PATH:/bin" -COPY tomll /bin/tomll -COPY tomljson /bin/tomljson -COPY jsontoml /bin/jsontoml diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/LICENSE b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/LICENSE deleted file mode 100644 index 991e2ae966..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -go-toml v2 -Copyright (c) 2021 - 2023 Thomas Pelletier - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/README.md b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/README.md deleted file mode 100644 index 0755e55642..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/README.md +++ /dev/null @@ -1,576 +0,0 @@ -# go-toml v2 - -Go library for the [TOML](https://toml.io/en/) format. - -This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). - -[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) - -[💬 Anything else](https://github.com/pelletier/go-toml/discussions) - -## Documentation - -Full API, examples, and implementation notes are available in the Go -documentation. - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) - -## Import - -```go -import "github.com/pelletier/go-toml/v2" -``` - -See [Modules](#Modules). - -## Features - -### Stdlib behavior - -As much as possible, this library is designed to behave similarly as the -standard library's `encoding/json`. - -### Performance - -While go-toml favors usability, it is written with performance in mind. Most -operations should not be shockingly slow. See [benchmarks](#benchmarks). - -### Strict mode - -`Decoder` can be set to "strict mode", which makes it error when some parts of -the TOML document was not present in the target structure. This is a great way -to check for typos. [See example in the documentation][strict]. - -[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields - -### Contextualized errors - -When most decoding errors occur, go-toml returns [`DecodeError`][decode-err], -which contains a human readable contextualized version of the error. For -example: - -``` -1| [server] -2| path = 100 - | ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string -3| port = 50 -``` - -[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError - -### Local date and time support - -TOML supports native [local date/times][ldt]. It allows to represent a given -date, time, or date-time without relation to a timezone or offset. To support -this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and -[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`, -making them convenient yet unambiguous structures for their respective TOML -representation. - -[ldt]: https://toml.io/en/v1.0.0#local-date-time -[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate -[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime -[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime - -### Commented config - -Since TOML is often used for configuration files, go-toml can emit documents -annotated with [comments and commented-out values][comments-example]. For -example, it can generate the following file: - -```toml -# Host IP to connect to. -host = '127.0.0.1' -# Port of the remote server. -port = 4242 - -# Encryption parameters (optional) -# [TLS] -# cipher = 'AEAD-AES128-GCM-SHA256' -# version = 'TLS 1.3' -``` - -[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented - -## Getting started - -Given the following struct, let's see how to read it and write it as TOML: - -```go -type MyConfig struct { - Version int - Name string - Tags []string -} -``` - -### Unmarshaling - -[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its -content. For example: - -```go -doc := ` -version = 2 -name = "go-toml" -tags = ["go", "toml"] -` - -var cfg MyConfig -err := toml.Unmarshal([]byte(doc), &cfg) -if err != nil { - panic(err) -} -fmt.Println("version:", cfg.Version) -fmt.Println("name:", cfg.Name) -fmt.Println("tags:", cfg.Tags) - -// Output: -// version: 2 -// name: go-toml -// tags: [go toml] -``` - -[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal - -### Marshaling - -[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure -as a TOML document: - -```go -cfg := MyConfig{ - Version: 2, - Name: "go-toml", - Tags: []string{"go", "toml"}, -} - -b, err := toml.Marshal(cfg) -if err != nil { - panic(err) -} -fmt.Println(string(b)) - -// Output: -// Version = 2 -// Name = 'go-toml' -// Tags = ['go', 'toml'] -``` - -[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal - -## Unstable API - -This API does not yet follow the backward compatibility guarantees of this -library. They provide early access to features that may have rough edges or an -API subject to change. - -### Parser - -Parser is the unstable API that allows iterative parsing of a TOML document at -the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. - -## Benchmarks - -Execution time speedup compared to other Go TOML libraries: - - - - - - - - - - - - - -
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
-
See more -

The table above has the results of the most common use-cases. The table below -contains the results of all benchmarks, including unrealistic ones. It is -provided for completeness.

- - - - - - - - - - - - - - - - - - -
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x
-

This table can be generated with ./ci.sh benchmark -a -html.

-
- -## Modules - -go-toml uses Go's standard modules system. - -Installation instructions: - -- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals - with it automatically. -- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`. - -In case of trouble: [Go Modules FAQ][mod-faq]. - -[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module - -## Tools - -Go-toml provides three handy command line tools: - - * `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest - $ tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest - $ jsontoml --help - ``` - - * `tomll`: Lints and reformats a TOML file. - - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest - $ tomll --help - ``` - -### Docker image - -Those tools are also available as a [Docker image][docker]. For example, to use -`tomljson`: - -``` -docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml -``` - -Multiple versions are available on [ghcr.io][docker]. - -[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml - -## Migrating from v1 - -This section describes the differences between v1 and v2, with some pointers on -how to get the original behavior when possible. - -### Decoding / Unmarshal - -#### Automatic field name guessing - -When unmarshaling to a struct, if a key in the TOML document does not exactly -match the name of a struct field or any of the `toml`-tagged field, v1 tries -multiple variations of the key ([code][v1-keys]). - -V2 instead does a case-insensitive matching, like `encoding/json`. - -This could impact you if you are relying on casing to differentiate two fields, -and one of them is a not using the `toml` struct tag. The recommended solution -is to be specific about tag names for those fields using the `toml` struct tag. - -[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781 - -#### Ignore preexisting value in interface - -When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the -element in the interface to decode the object. For example: - -```go -type inner struct { - B interface{} -} -type doc struct { - A interface{} -} - -d := doc{ - A: inner{ - B: "Before", - }, -} - -data := ` -[A] -B = "After" -` - -toml.Unmarshal([]byte(data), &d) -fmt.Printf("toml v1: %#v\n", d) - -// toml v1: main.doc{A:main.inner{B:"After"}} -``` - -In this case, field `A` is of type `interface{}`, containing a `inner` struct. -V1 sees that type and uses it when decoding the object. - -When decoding an object into an `interface{}`, V2 instead disregards whatever -value the `interface{}` may contain and replaces it with a -`map[string]interface{}`. With the same data structure as above, here is what -the result looks like: - -```go -toml.Unmarshal([]byte(data), &d) -fmt.Printf("toml v2: %#v\n", d) - -// toml v2: main.doc{A:map[string]interface {}{"B":"After"}} -``` - -This is to match `encoding/json`'s behavior. There is no way to make the v2 -decoder behave like v1. - -#### Values out of array bounds ignored - -When decoding into an array, v1 returns an error when the number of elements -contained in the doc is superior to the capacity of the array. For example: - -```go -type doc struct { - A [2]string -} -d := doc{} -err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) -fmt.Println(err) - -// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2) -``` - -In the same situation, v2 ignores the last value: - -```go -err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) -fmt.Println("err:", err, "d:", d) -// err: d: {[one two]} -``` - -This is to match `encoding/json`'s behavior. There is no way to make the v2 -decoder behave like v1. - -#### Support for `toml.Unmarshaler` has been dropped - -This method was not widely used, poorly defined, and added a lot of complexity. -A similar effect can be achieved by implementing the `encoding.TextUnmarshaler` -interface and use strings. - -#### Support for `default` struct tag has been dropped - -This feature adds complexity and a poorly defined API for an effect that can be -accomplished outside of the library. - -It does not seem like other format parsers in Go support that feature (the -project referenced in the original ticket #202 has not been updated since 2017). -Given that go-toml v2 should not touch values not in the document, the same -effect can be achieved by pre-filling the struct with defaults (libraries like -[go-defaults][go-defaults] can help). Also, string representation is not well -defined for all types: it creates issues like #278. - -The recommended replacement is pre-filling the struct before unmarshaling. - -[go-defaults]: https://github.com/mcuadros/go-defaults - -#### `toml.Tree` replacement - -This structure was the initial attempt at providing a document model for -go-toml. It allows manipulating the structure of any document, encoding and -decoding from their TOML representation. While a more robust feature was -initially planned in go-toml v2, this has been ultimately [removed from -scope][nodoc] of this library, with no plan to add it back at the moment. The -closest equivalent at the moment would be to unmarshal into an `interface{}` and -use type assertions and/or reflection to manipulate the arbitrary -structure. However this would fall short of providing all of the TOML features -such as adding comments and be specific about whitespace. - - -#### `toml.Position` are not retrievable anymore - -The API for retrieving the position (line, column) of a specific TOML element do -not exist anymore. This was done to minimize the amount of concepts introduced -by the library (query path), and avoid the performance hit related to storing -positions in the absence of a document model, for a feature that seemed to have -little use. Errors however have gained more detailed position -information. Position retrieval seems better fitted for a document model, which -has been [removed from the scope][nodoc] of go-toml v2 at the moment. - -### Encoding / Marshal - -#### Default struct fields order - -V1 emits struct fields order alphabetically by default. V2 struct fields are -emitted in order they are defined. For example: - -```go -type S struct { - B string - A string -} - -data := S{ - B: "B", - A: "A", -} - -b, _ := tomlv1.Marshal(data) -fmt.Println("v1:\n" + string(b)) - -b, _ = tomlv2.Marshal(data) -fmt.Println("v2:\n" + string(b)) - -// Output: -// v1: -// A = "A" -// B = "B" - -// v2: -// B = 'B' -// A = 'A' -``` - -There is no way to make v2 encoder behave like v1. A workaround could be to -manually sort the fields alphabetically in the struct definition, or generate -struct types using `reflect.StructOf`. - -#### No indentation by default - -V1 automatically indents content of tables by default. V2 does not. However the -same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example: - -```go -data := map[string]interface{}{ - "table": map[string]string{ - "key": "value", - }, -} - -b, _ := tomlv1.Marshal(data) -fmt.Println("v1:\n" + string(b)) - -b, _ = tomlv2.Marshal(data) -fmt.Println("v2:\n" + string(b)) - -buf := bytes.Buffer{} -enc := tomlv2.NewEncoder(&buf) -enc.SetIndentTables(true) -enc.Encode(data) -fmt.Println("v2 Encoder:\n" + string(buf.Bytes())) - -// Output: -// v1: -// -// [table] -// key = "value" -// -// v2: -// [table] -// key = 'value' -// -// -// v2 Encoder: -// [table] -// key = 'value' -``` - -[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables - -#### Keys and strings are single quoted - -V1 always uses double quotes (`"`) around strings and keys that cannot be -represented bare (unquoted). V2 uses single quotes instead by default (`'`), -unless a character cannot be represented, then falls back to double quotes. As a -result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not -useful anymore. - -There is no way to make v2 encoder behave like v1. - -#### `TextMarshaler` emits as a string, not TOML - -Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in -v1. The encoder would append the result to the output directly. In v2 the result -is wrapped in a string. As a result, this interface cannot be implemented by the -root object. - -There is no way to make v2 encoder behave like v1. - -[tm]: https://golang.org/pkg/encoding/#TextMarshaler - -#### `Encoder.CompactComments` has been removed - -Emitting compact comments is now the default behavior of go-toml. This option -is not necessary anymore. - -#### Struct tags have been merged - -V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, -`toml`, and `omitempty`. To behave more like the standard library, v2 has merged -`toml`, `multiline`, `commented`, and `omitempty`. For example: - -```go -type doc struct { - // v1 - F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"` - // v2 - F string `toml:"field,multiline,omitempty,commented"` -} -``` - -Has a result, the `Encoder.SetTag*` methods have been removed, as there is just -one tag now. - -#### `Encoder.ArraysWithOneElementPerLine` has been renamed - -The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. - -#### `Encoder.Indentation` has been renamed - -The new name is `Encoder.SetIndentSymbol`. The behavior should be the same. - - -#### Embedded structs behave like stdlib - -V1 defaults to merging embedded struct fields into the embedding struct. This -behavior was unexpected because it does not follow the standard library. To -avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was -added to make the encoder behave correctly. Given backward compatibility is not -a problem anymore, v2 does the right thing by default: it follows the behavior -of `encoding/json`. `Encoder.PromoteAnonymous` has been removed. - -[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038 - -### `query` - -go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run -JSONPath-style queries on TOML files. This feature is not available in v2. For a -replacement, check out [dasel][dasel]. - -This package has been removed because it was essentially not supported anymore -(last commit May 2020), increased the complexity of the code base, and more -complete solutions exist out there. - -[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query -[dasel]: https://github.com/TomWright/dasel - -## Versioning - -Expect for parts explicitly marked otherwise, go-toml follows [Semantic -Versioning](https://semver.org). The supported version of -[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this -document. The last two major versions of Go are supported (see [Go Release -Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/SECURITY.md deleted file mode 100644 index d4d554fda9..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/SECURITY.md +++ /dev/null @@ -1,16 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ---------- | ------------------ | -| Latest 2.x | :white_check_mark: | -| All 1.x | :x: | -| All 0.x | :x: | - -## Reporting a Vulnerability - -Email a vulnerability report to `security@pelletier.codes`. Make sure to include -as many details as possible to reproduce the vulnerability. This is a -side-project: I will try to get back to you as quickly as possible, time -permitting in my personal life. Providing a working patch helps very much! diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/ci.sh b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/ci.sh deleted file mode 100644 index 86217a9b09..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/ci.sh +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env bash - - -stderr() { - echo "$@" 1>&2 -} - -usage() { - b=$(basename "$0") - echo $b: ERROR: "$@" 1>&2 - - cat 1>&2 < coverage.out - go tool cover -func=coverage.out - echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 - popd - - if [ "${branch}" != "HEAD" ]; then - git worktree remove --force "$dir" - fi -} - -coverage() { - case "$1" in - -d) - shift - target="${1?Need to provide a target branch argument}" - - output_dir="$(mktemp -d)" - target_out="${output_dir}/target.txt" - head_out="${output_dir}/head.txt" - - cover "${target}" > "${target_out}" - cover "HEAD" > "${head_out}" - - cat "${target_out}" - cat "${head_out}" - - echo "" - - target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" - head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" - echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" - - delta_pct=$(echo "$head_pct - $target_pct" | bc -l) - echo "Delta: ${delta_pct}" - - if [[ $delta_pct = \-* ]]; then - echo "Regression!"; - - target_diff="${output_dir}/target.diff.txt" - head_diff="${output_dir}/head.diff.txt" - cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}" - cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}" - - diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}" - return 1 - fi - return 0 - ;; - esac - - cover "${1-HEAD}" -} - -bench() { - branch="${1}" - out="${2}" - replace="${3}" - dir="$(mktemp -d)" - - stderr "Executing benchmark for ${branch} at ${dir}" - - if [ "${branch}" = "HEAD" ]; then - cp -r . "${dir}/" - else - git worktree add "$dir" "$branch" - fi - - pushd "$dir" - - if [ "${replace}" != "" ]; then - find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; - go get "${replace}" - fi - - export GOMAXPROCS=2 - go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" - popd - - if [ "${branch}" != "HEAD" ]; then - git worktree remove --force "$dir" - fi -} - -fmktemp() { - if mktemp --version &> /dev/null; then - # GNU - mktemp --suffix=-$1 - else - # BSD - mktemp -t $1 - fi -} - -benchstathtml() { -python3 - $1 <<'EOF' -import sys - -lines = [] -stop = False - -with open(sys.argv[1]) as f: - for line in f.readlines(): - line = line.strip() - if line == "": - stop = True - if not stop: - lines.append(line.split(',')) - -results = [] -for line in reversed(lines[2:]): - if len(line) < 8 or line[0] == "": - continue - v2 = float(line[1]) - results.append([ - line[0].replace("-32", ""), - "%.1fx" % (float(line[3])/v2), # v1 - "%.1fx" % (float(line[7])/v2), # bs - ]) -# move geomean to the end -results.append(results[0]) -del results[0] - - -def printtable(data): - print(""" - - - - - """) - - for r in data: - print(" ".format(*r)) - - print(""" -
Benchmarkgo-toml v1BurntSushi/toml
{}{}{}
""") - - -def match(x): - return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0] - -above = [x for x in results if match(x)] -below = [x for x in results if not match(x)] - -printtable(above) -print("
See more") -print("""

The table above has the results of the most common use-cases. The table below -contains the results of all benchmarks, including unrealistic ones. It is -provided for completeness.

""") -printtable(below) -print('

This table can be generated with ./ci.sh benchmark -a -html.

') -print("
") - -EOF -} - -benchmark() { - case "$1" in - -d) - shift - target="${1?Need to provide a target branch argument}" - - old=`fmktemp ${target}` - bench "${target}" "${old}" - - new=`fmktemp HEAD` - bench HEAD "${new}" - - benchstat "${old}" "${new}" - return 0 - ;; - -a) - shift - - v2stats=`fmktemp go-toml-v2` - bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2" - v1stats=`fmktemp go-toml-v1` - bench HEAD "${v1stats}" "github.com/pelletier/go-toml" - bsstats=`fmktemp bs-toml` - bench HEAD "${bsstats}" "github.com/BurntSushi/toml" - - cp "${v2stats}" go-toml-v2.txt - cp "${v1stats}" go-toml-v1.txt - cp "${bsstats}" bs-toml.txt - - if [ "$1" = "-html" ]; then - tmpcsv=`fmktemp csv` - benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv - benchstathtml $tmpcsv - else - benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt - fi - - rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt - return $? - esac - - bench "${1-HEAD}" `mktemp` -} - -case "$1" in - coverage) shift; coverage $@;; - benchmark) shift; benchmark $@;; - *) usage "bad argument $1";; -esac diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/decode.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/decode.go deleted file mode 100644 index f0ec3b1705..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/decode.go +++ /dev/null @@ -1,550 +0,0 @@ -package toml - -import ( - "fmt" - "math" - "strconv" - "time" - - "github.com/pelletier/go-toml/v2/unstable" -) - -func parseInteger(b []byte) (int64, error) { - if len(b) > 2 && b[0] == '0' { - switch b[1] { - case 'x': - return parseIntHex(b) - case 'b': - return parseIntBin(b) - case 'o': - return parseIntOct(b) - default: - panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) - } - } - - return parseIntDec(b) -} - -func parseLocalDate(b []byte) (LocalDate, error) { - // full-date = date-fullyear "-" date-month "-" date-mday - // date-fullyear = 4DIGIT - // date-month = 2DIGIT ; 01-12 - // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year - var date LocalDate - - if len(b) != 10 || b[4] != '-' || b[7] != '-' { - return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") - } - - var err error - - date.Year, err = parseDecimalDigits(b[0:4]) - if err != nil { - return LocalDate{}, err - } - - date.Month, err = parseDecimalDigits(b[5:7]) - if err != nil { - return LocalDate{}, err - } - - date.Day, err = parseDecimalDigits(b[8:10]) - if err != nil { - return LocalDate{}, err - } - - if !isValidDate(date.Year, date.Month, date.Day) { - return LocalDate{}, unstable.NewParserError(b, "impossible date") - } - - return date, nil -} - -func parseDecimalDigits(b []byte) (int, error) { - v := 0 - - for i, c := range b { - if c < '0' || c > '9' { - return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") - } - v *= 10 - v += int(c - '0') - } - - return v, nil -} - -func parseDateTime(b []byte) (time.Time, error) { - // offset-date-time = full-date time-delim full-time - // full-time = partial-time time-offset - // time-offset = "Z" / time-numoffset - // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute - - dt, b, err := parseLocalDateTime(b) - if err != nil { - return time.Time{}, err - } - - var zone *time.Location - - if len(b) == 0 { - // parser should have checked that when assigning the date time node - panic("date time should have a timezone") - } - - if b[0] == 'Z' || b[0] == 'z' { - b = b[1:] - zone = time.UTC - } else { - const dateTimeByteLen = 6 - if len(b) != dateTimeByteLen { - return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") - } - var direction int - switch b[0] { - case '-': - direction = -1 - case '+': - direction = +1 - default: - return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") - } - - if b[3] != ':' { - return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") - } - - hours, err := parseDecimalDigits(b[1:3]) - if err != nil { - return time.Time{}, err - } - if hours > 23 { - return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") - } - - minutes, err := parseDecimalDigits(b[4:6]) - if err != nil { - return time.Time{}, err - } - if minutes > 59 { - return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") - } - - seconds := direction * (hours*3600 + minutes*60) - if seconds == 0 { - zone = time.UTC - } else { - zone = time.FixedZone("", seconds) - } - b = b[dateTimeByteLen:] - } - - if len(b) > 0 { - return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") - } - - t := time.Date( - dt.Year, - time.Month(dt.Month), - dt.Day, - dt.Hour, - dt.Minute, - dt.Second, - dt.Nanosecond, - zone) - - return t, nil -} - -func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { - var dt LocalDateTime - - const localDateTimeByteMinLen = 11 - if len(b) < localDateTimeByteMinLen { - return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") - } - - date, err := parseLocalDate(b[:10]) - if err != nil { - return dt, nil, err - } - dt.LocalDate = date - - sep := b[10] - if sep != 'T' && sep != ' ' && sep != 't' { - return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") - } - - t, rest, err := parseLocalTime(b[11:]) - if err != nil { - return dt, nil, err - } - dt.LocalTime = t - - return dt, rest, nil -} - -// parseLocalTime is a bit different because it also returns the remaining -// []byte that is didn't need. This is to allow parseDateTime to parse those -// remaining bytes as a timezone. -func parseLocalTime(b []byte) (LocalTime, []byte, error) { - var ( - nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} - t LocalTime - ) - - // check if b matches to have expected format HH:MM:SS[.NNNNNN] - const localTimeByteLen = 8 - if len(b) < localTimeByteLen { - return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") - } - - var err error - - t.Hour, err = parseDecimalDigits(b[0:2]) - if err != nil { - return t, nil, err - } - - if t.Hour > 23 { - return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") - } - if b[2] != ':' { - return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") - } - - t.Minute, err = parseDecimalDigits(b[3:5]) - if err != nil { - return t, nil, err - } - if t.Minute > 59 { - return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") - } - if b[5] != ':' { - return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") - } - - t.Second, err = parseDecimalDigits(b[6:8]) - if err != nil { - return t, nil, err - } - - if t.Second > 60 { - return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") - } - - b = b[8:] - - if len(b) >= 1 && b[0] == '.' { - frac := 0 - precision := 0 - digits := 0 - - for i, c := range b[1:] { - if !isDigit(c) { - if i == 0 { - return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") - } - break - } - digits++ - - const maxFracPrecision = 9 - if i >= maxFracPrecision { - // go-toml allows decoding fractional seconds - // beyond the supported precision of 9 - // digits. It truncates the fractional component - // to the supported precision and ignores the - // remaining digits. - // - // https://github.com/pelletier/go-toml/discussions/707 - continue - } - - frac *= 10 - frac += int(c - '0') - precision++ - } - - if precision == 0 { - return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") - } - - t.Nanosecond = frac * nspow[precision] - t.Precision = precision - - return t, b[1+digits:], nil - } - return t, b, nil -} - -//nolint:cyclop -func parseFloat(b []byte) (float64, error) { - if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { - return math.NaN(), nil - } - - cleaned, err := checkAndRemoveUnderscoresFloats(b) - if err != nil { - return 0, err - } - - if cleaned[0] == '.' { - return 0, unstable.NewParserError(b, "float cannot start with a dot") - } - - if cleaned[len(cleaned)-1] == '.' { - return 0, unstable.NewParserError(b, "float cannot end with a dot") - } - - dotAlreadySeen := false - for i, c := range cleaned { - if c == '.' { - if dotAlreadySeen { - return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") - } - if !isDigit(cleaned[i-1]) { - return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") - } - if !isDigit(cleaned[i+1]) { - return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") - } - dotAlreadySeen = true - } - } - - start := 0 - if cleaned[0] == '+' || cleaned[0] == '-' { - start = 1 - } - if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { - return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") - } - - f, err := strconv.ParseFloat(string(cleaned), 64) - if err != nil { - return 0, unstable.NewParserError(b, "unable to parse float: %w", err) - } - - return f, nil -} - -func parseIntHex(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) - if err != nil { - return 0, err - } - - i, err := strconv.ParseInt(string(cleaned), 16, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) - } - - return i, nil -} - -func parseIntOct(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) - if err != nil { - return 0, err - } - - i, err := strconv.ParseInt(string(cleaned), 8, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) - } - - return i, nil -} - -func parseIntBin(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) - if err != nil { - return 0, err - } - - i, err := strconv.ParseInt(string(cleaned), 2, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) - } - - return i, nil -} - -func isSign(b byte) bool { - return b == '+' || b == '-' -} - -func parseIntDec(b []byte) (int64, error) { - cleaned, err := checkAndRemoveUnderscoresIntegers(b) - if err != nil { - return 0, err - } - - startIdx := 0 - - if isSign(cleaned[0]) { - startIdx++ - } - - if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { - return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") - } - - i, err := strconv.ParseInt(string(cleaned), 10, 64) - if err != nil { - return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) - } - - return i, nil -} - -func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { - start := 0 - if b[start] == '+' || b[start] == '-' { - start++ - } - - if len(b) == start { - return b, nil - } - - if b[start] == '_' { - return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") - } - - if b[len(b)-1] == '_' { - return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") - } - - // fast path - i := 0 - for ; i < len(b); i++ { - if b[i] == '_' { - break - } - } - if i == len(b) { - return b, nil - } - - before := false - cleaned := make([]byte, i, len(b)) - copy(cleaned, b) - - for i++; i < len(b); i++ { - c := b[i] - if c == '_' { - if !before { - return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") - } - before = false - } else { - before = true - cleaned = append(cleaned, c) - } - } - - return cleaned, nil -} - -func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { - if b[0] == '_' { - return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") - } - - if b[len(b)-1] == '_' { - return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") - } - - // fast path - i := 0 - for ; i < len(b); i++ { - if b[i] == '_' { - break - } - } - if i == len(b) { - return b, nil - } - - before := false - cleaned := make([]byte, 0, len(b)) - - for i := 0; i < len(b); i++ { - c := b[i] - - switch c { - case '_': - if !before { - return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") - } - if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { - return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") - } - before = false - case '+', '-': - // signed exponents - cleaned = append(cleaned, c) - before = false - case 'e', 'E': - if i < len(b)-1 && b[i+1] == '_' { - return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") - } - cleaned = append(cleaned, c) - case '.': - if i < len(b)-1 && b[i+1] == '_' { - return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") - } - if i > 0 && b[i-1] == '_' { - return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") - } - cleaned = append(cleaned, c) - default: - before = true - cleaned = append(cleaned, c) - } - } - - return cleaned, nil -} - -// isValidDate checks if a provided date is a date that exists. -func isValidDate(year int, month int, day int) bool { - return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) -} - -// daysBefore[m] counts the number of days in a non-leap year -// before month m begins. There is an entry for m=12, counting -// the number of days before January of next year (365). -var daysBefore = [...]int32{ - 0, - 31, - 31 + 28, - 31 + 28 + 31, - 31 + 28 + 31 + 30, - 31 + 28 + 31 + 30 + 31, - 31 + 28 + 31 + 30 + 31 + 30, - 31 + 28 + 31 + 30 + 31 + 30 + 31, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, - 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, -} - -func daysIn(m int, year int) int { - if m == 2 && isLeap(year) { - return 29 - } - return int(daysBefore[m] - daysBefore[m-1]) -} - -func isLeap(year int) bool { - return year%4 == 0 && (year%100 != 0 || year%400 == 0) -} - -func isDigit(r byte) bool { - return r >= '0' && r <= '9' -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/doc.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/doc.go deleted file mode 100644 index b7bc599bde..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package toml is a library to read and write TOML documents. -package toml diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/errors.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/errors.go deleted file mode 100644 index 309733f1f9..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/errors.go +++ /dev/null @@ -1,252 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "strings" - - "github.com/pelletier/go-toml/v2/internal/danger" - "github.com/pelletier/go-toml/v2/unstable" -) - -// DecodeError represents an error encountered during the parsing or decoding -// of a TOML document. -// -// In addition to the error message, it contains the position in the document -// where it happened, as well as a human-readable representation that shows -// where the error occurred in the document. -type DecodeError struct { - message string - line int - column int - key Key - - human string -} - -// StrictMissingError occurs in a TOML document that does not have a -// corresponding field in the target value. It contains all the missing fields -// in Errors. -// -// Emitted by Decoder when DisallowUnknownFields() was called. -type StrictMissingError struct { - // One error per field that could not be found. - Errors []DecodeError -} - -// Error returns the canonical string for this error. -func (s *StrictMissingError) Error() string { - return "strict mode: fields in the document are missing in the target struct" -} - -// String returns a human readable description of all errors. -func (s *StrictMissingError) String() string { - var buf strings.Builder - - for i, e := range s.Errors { - if i > 0 { - buf.WriteString("\n---\n") - } - - buf.WriteString(e.String()) - } - - return buf.String() -} - -type Key []string - -// Error returns the error message contained in the DecodeError. -func (e *DecodeError) Error() string { - return "toml: " + e.message -} - -// String returns the human-readable contextualized error. This string is multi-line. -func (e *DecodeError) String() string { - return e.human -} - -// Position returns the (line, column) pair indicating where the error -// occurred in the document. Positions are 1-indexed. -func (e *DecodeError) Position() (row int, column int) { - return e.line, e.column -} - -// Key that was being processed when the error occurred. The key is present only -// if this DecodeError is part of a StrictMissingError. -func (e *DecodeError) Key() Key { - return e.key -} - -// decodeErrorFromHighlight creates a DecodeError referencing a highlighted -// range of bytes from document. -// -// highlight needs to be a sub-slice of document, or this function panics. -// -// The function copies all bytes used in DecodeError, so that document and -// highlight can be freely deallocated. -// -//nolint:funlen -func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { - offset := danger.SubsliceOffset(document, de.Highlight) - - errMessage := de.Error() - errLine, errColumn := positionAtEnd(document[:offset]) - before, after := linesOfContext(document, de.Highlight, offset, 3) - - var buf strings.Builder - - maxLine := errLine + len(after) - 1 - lineColumnWidth := len(strconv.Itoa(maxLine)) - - // Write the lines of context strictly before the error. - for i := len(before) - 1; i > 0; i-- { - line := errLine - i - buf.WriteString(formatLineNumber(line, lineColumnWidth)) - buf.WriteString("|") - - if len(before[i]) > 0 { - buf.WriteString(" ") - buf.Write(before[i]) - } - - buf.WriteRune('\n') - } - - // Write the document line that contains the error. - - buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) - buf.WriteString("| ") - - if len(before) > 0 { - buf.Write(before[0]) - } - - buf.Write(de.Highlight) - - if len(after) > 0 { - buf.Write(after[0]) - } - - buf.WriteRune('\n') - - // Write the line with the error message itself (so it does not have a line - // number). - - buf.WriteString(strings.Repeat(" ", lineColumnWidth)) - buf.WriteString("| ") - - if len(before) > 0 { - buf.WriteString(strings.Repeat(" ", len(before[0]))) - } - - buf.WriteString(strings.Repeat("~", len(de.Highlight))) - - if len(errMessage) > 0 { - buf.WriteString(" ") - buf.WriteString(errMessage) - } - - // Write the lines of context strictly after the error. - - for i := 1; i < len(after); i++ { - buf.WriteRune('\n') - line := errLine + i - buf.WriteString(formatLineNumber(line, lineColumnWidth)) - buf.WriteString("|") - - if len(after[i]) > 0 { - buf.WriteString(" ") - buf.Write(after[i]) - } - } - - return &DecodeError{ - message: errMessage, - line: errLine, - column: errColumn, - key: de.Key, - human: buf.String(), - } -} - -func formatLineNumber(line int, width int) string { - format := "%" + strconv.Itoa(width) + "d" - - return fmt.Sprintf(format, line) -} - -func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { - return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) -} - -func beforeLines(document []byte, offset int, linesAround int) [][]byte { - var beforeLines [][]byte - - // Walk the document backward from the highlight to find previous lines - // of context. - rest := document[:offset] -backward: - for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { - switch { - case rest[o] == '\n': - // handle individual lines - beforeLines = append(beforeLines, rest[o+1:]) - rest = rest[:o] - o = len(rest) - 1 - case o == 0: - // add the first line only if it's non-empty - beforeLines = append(beforeLines, rest) - - break backward - default: - o-- - } - } - - return beforeLines -} - -func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { - var afterLines [][]byte - - // Walk the document forward from the highlight to find the following - // lines of context. - rest := document[offset+len(highlight):] -forward: - for o := 0; o < len(rest) && len(afterLines) <= linesAround; { - switch { - case rest[o] == '\n': - // handle individual lines - afterLines = append(afterLines, rest[:o]) - rest = rest[o+1:] - o = 0 - - case o == len(rest)-1: - // add last line only if it's non-empty - afterLines = append(afterLines, rest) - - break forward - default: - o++ - } - } - - return afterLines -} - -func positionAtEnd(b []byte) (row int, column int) { - row = 1 - column = 1 - - for _, c := range b { - if c == '\n' { - row++ - column = 1 - } else { - column++ - } - } - - return -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go deleted file mode 100644 index 80f698db4b..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go +++ /dev/null @@ -1,42 +0,0 @@ -package characters - -var invalidAsciiTable = [256]bool{ - 0x00: true, - 0x01: true, - 0x02: true, - 0x03: true, - 0x04: true, - 0x05: true, - 0x06: true, - 0x07: true, - 0x08: true, - // 0x09 TAB - // 0x0A LF - 0x0B: true, - 0x0C: true, - // 0x0D CR - 0x0E: true, - 0x0F: true, - 0x10: true, - 0x11: true, - 0x12: true, - 0x13: true, - 0x14: true, - 0x15: true, - 0x16: true, - 0x17: true, - 0x18: true, - 0x19: true, - 0x1A: true, - 0x1B: true, - 0x1C: true, - 0x1D: true, - 0x1E: true, - 0x1F: true, - // 0x20 - 0x7E Printable ASCII characters - 0x7F: true, -} - -func InvalidAscii(b byte) bool { - return invalidAsciiTable[b] -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go deleted file mode 100644 index db4f45acbf..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go +++ /dev/null @@ -1,199 +0,0 @@ -package characters - -import ( - "unicode/utf8" -) - -type utf8Err struct { - Index int - Size int -} - -func (u utf8Err) Zero() bool { - return u.Size == 0 -} - -// Verified that a given string is only made of valid UTF-8 characters allowed -// by the TOML spec: -// -// Any Unicode character may be used except those that must be escaped: -// quotation mark, backslash, and the control characters other than tab (U+0000 -// to U+0008, U+000A to U+001F, U+007F). -// -// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early -// when a character is not allowed. -// -// The returned utf8Err is Zero() if the string is valid, or contains the byte -// index and size of the invalid character. -// -// quotation mark => already checked -// backslash => already checked -// 0-0x8 => invalid -// 0x9 => tab, ok -// 0xA - 0x1F => invalid -// 0x7F => invalid -func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { - // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. - offset := 0 - for len(p) >= 8 { - // Combining two 32 bit loads allows the same code to be used - // for 32 and 64 bit platforms. - // The compiler can generate a 32bit load for first32 and second32 - // on many platforms. See test/codegen/memcombine.go. - first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 - second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 - if (first32|second32)&0x80808080 != 0 { - // Found a non ASCII byte (>= RuneSelf). - break - } - - for i, b := range p[:8] { - if InvalidAscii(b) { - err.Index = offset + i - err.Size = 1 - return - } - } - - p = p[8:] - offset += 8 - } - n := len(p) - for i := 0; i < n; { - pi := p[i] - if pi < utf8.RuneSelf { - if InvalidAscii(pi) { - err.Index = offset + i - err.Size = 1 - return - } - i++ - continue - } - x := first[pi] - if x == xx { - // Illegal starter byte. - err.Index = offset + i - err.Size = 1 - return - } - size := int(x & 7) - if i+size > n { - // Short or invalid. - err.Index = offset + i - err.Size = n - i - return - } - accept := acceptRanges[x>>4] - if c := p[i+1]; c < accept.lo || accept.hi < c { - err.Index = offset + i - err.Size = 2 - return - } else if size == 2 { - } else if c := p[i+2]; c < locb || hicb < c { - err.Index = offset + i - err.Size = 3 - return - } else if size == 3 { - } else if c := p[i+3]; c < locb || hicb < c { - err.Index = offset + i - err.Size = 4 - return - } - i += size - } - return -} - -// Return the size of the next rune if valid, 0 otherwise. -func Utf8ValidNext(p []byte) int { - c := p[0] - - if c < utf8.RuneSelf { - if InvalidAscii(c) { - return 0 - } - return 1 - } - - x := first[c] - if x == xx { - // Illegal starter byte. - return 0 - } - size := int(x & 7) - if size > len(p) { - // Short or invalid. - return 0 - } - accept := acceptRanges[x>>4] - if c := p[1]; c < accept.lo || accept.hi < c { - return 0 - } else if size == 2 { - } else if c := p[2]; c < locb || hicb < c { - return 0 - } else if size == 3 { - } else if c := p[3]; c < locb || hicb < c { - return 0 - } - - return size -} - -// acceptRange gives the range of valid values for the second byte in a UTF-8 -// sequence. -type acceptRange struct { - lo uint8 // lowest value for second byte. - hi uint8 // highest value for second byte. -} - -// acceptRanges has size 16 to avoid bounds checks in the code that uses it. -var acceptRanges = [16]acceptRange{ - 0: {locb, hicb}, - 1: {0xA0, hicb}, - 2: {locb, 0x9F}, - 3: {0x90, hicb}, - 4: {locb, 0x8F}, -} - -// first is information about the first byte in a UTF-8 sequence. -var first = [256]uint8{ - // 1 2 3 4 5 6 7 8 9 A B C D E F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F - // 1 2 3 4 5 6 7 8 9 A B C D E F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF - xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF - s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF - s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF - s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF -} - -const ( - // The default lowest and highest continuation byte. - locb = 0b10000000 - hicb = 0b10111111 - - // These names of these constants are chosen to give nice alignment in the - // table below. The first nibble is an index into acceptRanges or F for - // special one-byte cases. The second nibble is the Rune length or the - // Status for the special one-byte case. - xx = 0xF1 // invalid: size 1 - as = 0xF0 // ASCII: size 1 - s1 = 0x02 // accept 0, size 2 - s2 = 0x13 // accept 1, size 3 - s3 = 0x03 // accept 0, size 3 - s4 = 0x23 // accept 2, size 3 - s5 = 0x34 // accept 3, size 4 - s6 = 0x04 // accept 0, size 4 - s7 = 0x44 // accept 4, size 4 -) diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go deleted file mode 100644 index e38e1131b8..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go +++ /dev/null @@ -1,65 +0,0 @@ -package danger - -import ( - "fmt" - "reflect" - "unsafe" -) - -const maxInt = uintptr(int(^uint(0) >> 1)) - -func SubsliceOffset(data []byte, subslice []byte) int { - datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) - - if hlp.Data < datap.Data { - panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) - } - offset := hlp.Data - datap.Data - - if offset > maxInt { - panic(fmt.Errorf("slice offset larger than int (%d)", offset)) - } - - intoffset := int(offset) - - if intoffset > datap.Len { - panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) - } - - if intoffset+hlp.Len > datap.Len { - panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) - } - - return intoffset -} - -func BytesRange(start []byte, end []byte) []byte { - if start == nil || end == nil { - panic("cannot call BytesRange with nil") - } - startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) - endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) - - if startp.Data > endp.Data { - panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) - } - - l := startp.Len - endLen := int(endp.Data-startp.Data) + endp.Len - if endLen > l { - l = endLen - } - - if l > startp.Cap { - panic(fmt.Errorf("range length is larger than capacity")) - } - - return start[:l] -} - -func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { - // TODO: replace with unsafe.Add when Go 1.17 is released - // https://github.com/golang/go/issues/40481 - return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go deleted file mode 100644 index 9d41c28a2f..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go +++ /dev/null @@ -1,23 +0,0 @@ -package danger - -import ( - "reflect" - "unsafe" -) - -// typeID is used as key in encoder and decoder caches to enable using -// the optimize runtime.mapaccess2_fast64 function instead of the more -// expensive lookup if we were to use reflect.Type as map key. -// -// typeID holds the pointer to the reflect.Type value, which is unique -// in the program. -// -// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 -type TypeID unsafe.Pointer - -func MakeTypeID(t reflect.Type) TypeID { - // reflect.Type has the fields: - // typ unsafe.Pointer - // ptr unsafe.Pointer - return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go deleted file mode 100644 index 149b17f538..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go +++ /dev/null @@ -1,48 +0,0 @@ -package tracker - -import "github.com/pelletier/go-toml/v2/unstable" - -// KeyTracker is a tracker that keeps track of the current Key as the AST is -// walked. -type KeyTracker struct { - k []string -} - -// UpdateTable sets the state of the tracker with the AST table node. -func (t *KeyTracker) UpdateTable(node *unstable.Node) { - t.reset() - t.Push(node) -} - -// UpdateArrayTable sets the state of the tracker with the AST array table node. -func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { - t.reset() - t.Push(node) -} - -// Push the given key on the stack. -func (t *KeyTracker) Push(node *unstable.Node) { - it := node.Key() - for it.Next() { - t.k = append(t.k, string(it.Node().Data)) - } -} - -// Pop key from stack. -func (t *KeyTracker) Pop(node *unstable.Node) { - it := node.Key() - for it.Next() { - t.k = t.k[:len(t.k)-1] - } -} - -// Key returns the current key -func (t *KeyTracker) Key() []string { - k := make([]string, len(t.k)) - copy(k, t.k) - return k -} - -func (t *KeyTracker) reset() { - t.k = t.k[:0] -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go deleted file mode 100644 index 76df2d5b6a..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ /dev/null @@ -1,358 +0,0 @@ -package tracker - -import ( - "bytes" - "fmt" - "sync" - - "github.com/pelletier/go-toml/v2/unstable" -) - -type keyKind uint8 - -const ( - invalidKind keyKind = iota - valueKind - tableKind - arrayTableKind -) - -func (k keyKind) String() string { - switch k { - case invalidKind: - return "invalid" - case valueKind: - return "value" - case tableKind: - return "table" - case arrayTableKind: - return "array table" - } - panic("missing keyKind string mapping") -} - -// SeenTracker tracks which keys have been seen with which TOML type to flag -// duplicates and mismatches according to the spec. -// -// Each node in the visited tree is represented by an entry. Each entry has an -// identifier, which is provided by a counter. Entries are stored in the array -// entries. As new nodes are discovered (referenced for the first time in the -// TOML document), entries are created and appended to the array. An entry -// points to its parent using its id. -// -// To find whether a given key (sequence of []byte) has already been visited, -// the entries are linearly searched, looking for one with the right name and -// parent id. -// -// Given that all keys appear in the document after their parent, it is -// guaranteed that all descendants of a node are stored after the node, this -// speeds up the search process. -// -// When encountering [[array tables]], the descendants of that node are removed -// to allow that branch of the tree to be "rediscovered". To maintain the -// invariant above, the deletion process needs to keep the order of entries. -// This results in more copies in that case. -type SeenTracker struct { - entries []entry - currentIdx int -} - -var pool = sync.Pool{ - New: func() interface{} { - return &SeenTracker{} - }, -} - -func (s *SeenTracker) reset() { - // Always contains a root element at index 0. - s.currentIdx = 0 - if len(s.entries) == 0 { - s.entries = make([]entry, 1, 2) - } else { - s.entries = s.entries[:1] - } - s.entries[0].child = -1 - s.entries[0].next = -1 -} - -type entry struct { - // Use -1 to indicate no child or no sibling. - child int - next int - - name []byte - kind keyKind - explicit bool - kv bool -} - -// Find the index of the child of parentIdx with key k. Returns -1 if -// it does not exist. -func (s *SeenTracker) find(parentIdx int, k []byte) int { - for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { - if bytes.Equal(s.entries[i].name, k) { - return i - } - } - return -1 -} - -// Remove all descendants of node at position idx. -func (s *SeenTracker) clear(idx int) { - if idx >= len(s.entries) { - return - } - - for i := s.entries[idx].child; i >= 0; { - next := s.entries[i].next - n := s.entries[0].next - s.entries[0].next = i - s.entries[i].next = n - s.entries[i].name = nil - s.clear(i) - i = next - } - - s.entries[idx].child = -1 -} - -func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { - e := entry{ - child: -1, - next: s.entries[parentIdx].child, - - name: name, - kind: kind, - explicit: explicit, - kv: kv, - } - var idx int - if s.entries[0].next >= 0 { - idx = s.entries[0].next - s.entries[0].next = s.entries[idx].next - s.entries[idx] = e - } else { - idx = len(s.entries) - s.entries = append(s.entries, e) - } - - s.entries[parentIdx].child = idx - - return idx -} - -func (s *SeenTracker) setExplicitFlag(parentIdx int) { - for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { - if s.entries[i].kv { - s.entries[i].explicit = true - s.entries[i].kv = false - } - s.setExplicitFlag(i) - } -} - -// CheckExpression takes a top-level node and checks that it does not contain -// keys that have been seen in previous calls, and validates that types are -// consistent. It returns true if it is the first time this node's key is seen. -// Useful to clear array tables on first use. -func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { - if s.entries == nil { - s.reset() - } - switch node.Kind { - case unstable.KeyValue: - return s.checkKeyValue(node) - case unstable.Table: - return s.checkTable(node) - case unstable.ArrayTable: - return s.checkArrayTable(node) - default: - panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) - } -} - -func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { - if s.currentIdx >= 0 { - s.setExplicitFlag(s.currentIdx) - } - - it := node.Key() - - parentIdx := 0 - - // This code is duplicated in checkArrayTable. This is because factoring - // it in a function requires to copy the iterator, or allocate it to the - // heap, which is not cheap. - for it.Next() { - if it.IsLast() { - break - } - - k := it.Node().Data - - idx := s.find(parentIdx, k) - - if idx < 0 { - idx = s.create(parentIdx, k, tableKind, false, false) - } else { - entry := s.entries[idx] - if entry.kind == valueKind { - return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) - } - } - parentIdx = idx - } - - k := it.Node().Data - idx := s.find(parentIdx, k) - - first := false - if idx >= 0 { - kind := s.entries[idx].kind - if kind != tableKind { - return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) - } - if s.entries[idx].explicit { - return false, fmt.Errorf("toml: table %s already exists", string(k)) - } - s.entries[idx].explicit = true - } else { - idx = s.create(parentIdx, k, tableKind, true, false) - first = true - } - - s.currentIdx = idx - - return first, nil -} - -func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { - if s.currentIdx >= 0 { - s.setExplicitFlag(s.currentIdx) - } - - it := node.Key() - - parentIdx := 0 - - for it.Next() { - if it.IsLast() { - break - } - - k := it.Node().Data - - idx := s.find(parentIdx, k) - - if idx < 0 { - idx = s.create(parentIdx, k, tableKind, false, false) - } else { - entry := s.entries[idx] - if entry.kind == valueKind { - return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) - } - } - - parentIdx = idx - } - - k := it.Node().Data - idx := s.find(parentIdx, k) - - firstTime := idx < 0 - if firstTime { - idx = s.create(parentIdx, k, arrayTableKind, true, false) - } else { - kind := s.entries[idx].kind - if kind != arrayTableKind { - return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) - } - s.clear(idx) - } - - s.currentIdx = idx - - return firstTime, nil -} - -func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { - parentIdx := s.currentIdx - it := node.Key() - - for it.Next() { - k := it.Node().Data - - idx := s.find(parentIdx, k) - - if idx < 0 { - idx = s.create(parentIdx, k, tableKind, false, true) - } else { - entry := s.entries[idx] - if it.IsLast() { - return false, fmt.Errorf("toml: key %s is already defined", string(k)) - } else if entry.kind != tableKind { - return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) - } else if entry.explicit { - return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) - } - } - - parentIdx = idx - } - - s.entries[parentIdx].kind = valueKind - - value := node.Value() - - switch value.Kind { - case unstable.InlineTable: - return s.checkInlineTable(value) - case unstable.Array: - return s.checkArray(value) - } - - return false, nil -} - -func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { - it := node.Children() - for it.Next() { - n := it.Node() - switch n.Kind { - case unstable.InlineTable: - first, err = s.checkInlineTable(n) - if err != nil { - return false, err - } - case unstable.Array: - first, err = s.checkArray(n) - if err != nil { - return false, err - } - } - } - return first, nil -} - -func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { - s = pool.Get().(*SeenTracker) - s.reset() - - it := node.Children() - for it.Next() { - n := it.Node() - first, err = s.checkKeyValue(n) - if err != nil { - return false, err - } - } - - // As inline tables are self-contained, the tracker does not - // need to retain the details of what they contain. The - // keyValue element that creates the inline table is kept to - // mark the presence of the inline table and prevent - // redefinition of its keys: check* functions cannot walk into - // a value. - pool.Put(s) - return first, nil -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go deleted file mode 100644 index bf0317392f..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go +++ /dev/null @@ -1 +0,0 @@ -package tracker diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/localtime.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/localtime.go deleted file mode 100644 index a856bfdb0d..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/localtime.go +++ /dev/null @@ -1,122 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "time" - - "github.com/pelletier/go-toml/v2/unstable" -) - -// LocalDate represents a calendar day in no specific timezone. -type LocalDate struct { - Year int - Month int - Day int -} - -// AsTime converts d into a specific time instance at midnight in zone. -func (d LocalDate) AsTime(zone *time.Location) time.Time { - return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) -} - -// String returns RFC 3339 representation of d. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// MarshalText returns RFC 3339 representation of d. -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText parses b using RFC 3339 to fill d. -func (d *LocalDate) UnmarshalText(b []byte) error { - res, err := parseLocalDate(b) - if err != nil { - return err - } - *d = res - return nil -} - -// LocalTime represents a time of day of no specific day in no specific -// timezone. -type LocalTime struct { - Hour int // Hour of the day: [0; 24[ - Minute int // Minute of the hour: [0; 60[ - Second int // Second of the minute: [0; 60[ - Nanosecond int // Nanoseconds within the second: [0, 1000000000[ - Precision int // Number of digits to display for Nanosecond. -} - -// String returns RFC 3339 representation of d. -// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond -// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number -// of digits for nanoseconds is provided. -func (d LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) - - if d.Precision > 0 { - s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] - } else if d.Nanosecond > 0 { - // Nanoseconds are specified, but precision is not provided. Use the - // minimum. - s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") - } - - return s -} - -// MarshalText returns RFC 3339 representation of d. -func (d LocalTime) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText parses b using RFC 3339 to fill d. -func (d *LocalTime) UnmarshalText(b []byte) error { - res, left, err := parseLocalTime(b) - if err == nil && len(left) != 0 { - err = unstable.NewParserError(left, "extra characters") - } - if err != nil { - return err - } - *d = res - return nil -} - -// LocalDateTime represents a time of a specific day in no specific timezone. -type LocalDateTime struct { - LocalDate - LocalTime -} - -// AsTime converts d into a specific time instance in zone. -func (d LocalDateTime) AsTime(zone *time.Location) time.Time { - return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) -} - -// String returns RFC 3339 representation of d. -func (d LocalDateTime) String() string { - return d.LocalDate.String() + "T" + d.LocalTime.String() -} - -// MarshalText returns RFC 3339 representation of d. -func (d LocalDateTime) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText parses b using RFC 3339 to fill d. -func (d *LocalDateTime) UnmarshalText(data []byte) error { - res, left, err := parseLocalDateTime(data) - if err == nil && len(left) != 0 { - err = unstable.NewParserError(left, "extra characters") - } - if err != nil { - return err - } - - *d = res - return nil -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/marshaler.go deleted file mode 100644 index 161acd9343..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ /dev/null @@ -1,1133 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "encoding/json" - "fmt" - "io" - "math" - "reflect" - "slices" - "strconv" - "strings" - "time" - "unicode" - - "github.com/pelletier/go-toml/v2/internal/characters" -) - -// Marshal serializes a Go value as a TOML document. -// -// It is a shortcut for Encoder.Encode() with the default options. -func Marshal(v interface{}) ([]byte, error) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - - err := enc.Encode(v) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// Encoder writes a TOML document to an output stream. -type Encoder struct { - // output - w io.Writer - - // global settings - tablesInline bool - arraysMultiline bool - indentSymbol string - indentTables bool - marshalJsonNumbers bool -} - -// NewEncoder returns a new Encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - indentSymbol: " ", - } -} - -// SetTablesInline forces the encoder to emit all tables inline. -// -// This behavior can be controlled on an individual struct field basis with the -// inline tag: -// -// MyField `toml:",inline"` -func (enc *Encoder) SetTablesInline(inline bool) *Encoder { - enc.tablesInline = inline - return enc -} - -// SetArraysMultiline forces the encoder to emit all arrays with one element per -// line. -// -// This behavior can be controlled on an individual struct field basis with the multiline tag: -// -// MyField `multiline:"true"` -func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { - enc.arraysMultiline = multiline - return enc -} - -// SetIndentSymbol defines the string that should be used for indentation. The -// provided string is repeated for each indentation level. Defaults to two -// spaces. -func (enc *Encoder) SetIndentSymbol(s string) *Encoder { - enc.indentSymbol = s - return enc -} - -// SetIndentTables forces the encoder to intent tables and array tables. -func (enc *Encoder) SetIndentTables(indent bool) *Encoder { - enc.indentTables = indent - return enc -} - -// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a -// float or integer instead of relying on TextMarshaler to emit a string. -// -// *Unstable:* This method does not follow the compatibility guarantees of -// semver. It can be changed or removed without a new major version being -// issued. -func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { - enc.marshalJsonNumbers = indent - return enc -} - -// Encode writes a TOML representation of v to the stream. -// -// If v cannot be represented to TOML it returns an error. -// -// # Encoding rules -// -// A top level slice containing only maps or structs is encoded as [[table -// array]]. -// -// All slices not matching rule 1 are encoded as [array]. As a result, any map -// or struct they contain is encoded as an {inline table}. -// -// Nil interfaces and nil pointers are not supported. -// -// Keys in key-values always have one part. -// -// Intermediate tables are always printed. -// -// By default, strings are encoded as literal string, unless they contain either -// a newline character or a single quote. In that case they are emitted as -// quoted strings. -// -// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so -// results in an error. This rule exists because the TOML specification only -// requires parsers to support at least the 64 bits integer range. Allowing -// larger numbers would create non-standard TOML documents, which may not be -// readable (at best) by other implementations. To encode such numbers, a -// solution is a custom type that implements encoding.TextMarshaler. -// -// When encoding structs, fields are encoded in order of definition, with their -// exact name. -// -// Tables and array tables are separated by empty lines. However, consecutive -// subtables definitions are not. For example: -// -// [top1] -// -// [top2] -// [top2.child1] -// -// [[array]] -// -// [[array]] -// [array.child2] -// -// # Struct tags -// -// The encoding of each public struct field can be customized by the format -// string in the "toml" key of the struct field's tag. This follows -// encoding/json's convention. The format string starts with the name of the -// field, optionally followed by a comma-separated list of options. The name may -// be empty in order to provide options without overriding the default name. -// -// The "multiline" option emits strings as quoted multi-line TOML strings. It -// has no effect on fields that would not be encoded as strings. -// -// The "inline" option turns fields that would be emitted as tables into inline -// tables instead. It has no effect on other fields. -// -// The "omitempty" option prevents empty values or groups from being emitted. -// -// The "commented" option prefixes the value and all its children with a comment -// symbol. -// -// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit -// a TOML comment before the value being annotated. Comments are ignored inside -// inline tables. For array tables, the comment is only present before the first -// element of the array. -func (enc *Encoder) Encode(v interface{}) error { - var ( - b []byte - ctx encoderCtx - ) - - ctx.inline = enc.tablesInline - - if v == nil { - return fmt.Errorf("toml: cannot encode a nil interface") - } - - b, err := enc.encode(b, ctx, reflect.ValueOf(v)) - if err != nil { - return err - } - - _, err = enc.w.Write(b) - if err != nil { - return fmt.Errorf("toml: cannot write: %w", err) - } - - return nil -} - -type valueOptions struct { - multiline bool - omitempty bool - commented bool - comment string -} - -type encoderCtx struct { - // Current top-level key. - parentKey []string - - // Key that should be used for a KV. - key string - // Extra flag to account for the empty string - hasKey bool - - // Set to true to indicate that the encoder is inside a KV, so that all - // tables need to be inlined. - insideKv bool - - // Set to true to skip the first table header in an array table. - skipTableHeader bool - - // Should the next table be encoded as inline - inline bool - - // Indentation level - indent int - - // Prefix the current value with a comment. - commented bool - - // Options coming from struct tags - options valueOptions -} - -func (ctx *encoderCtx) shiftKey() { - if ctx.hasKey { - ctx.parentKey = append(ctx.parentKey, ctx.key) - ctx.clearKey() - } -} - -func (ctx *encoderCtx) setKey(k string) { - ctx.key = k - ctx.hasKey = true -} - -func (ctx *encoderCtx) clearKey() { - ctx.key = "" - ctx.hasKey = false -} - -func (ctx *encoderCtx) isRoot() bool { - return len(ctx.parentKey) == 0 && !ctx.hasKey -} - -func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - i := v.Interface() - - switch x := i.(type) { - case time.Time: - if x.Nanosecond() > 0 { - return x.AppendFormat(b, time.RFC3339Nano), nil - } - return x.AppendFormat(b, time.RFC3339), nil - case LocalTime: - return append(b, x.String()...), nil - case LocalDate: - return append(b, x.String()...), nil - case LocalDateTime: - return append(b, x.String()...), nil - case json.Number: - if enc.marshalJsonNumbers { - if x == "" { /// Useful zero value. - return append(b, "0"...), nil - } else if v, err := x.Int64(); err == nil { - return enc.encode(b, ctx, reflect.ValueOf(v)) - } else if f, err := x.Float64(); err == nil { - return enc.encode(b, ctx, reflect.ValueOf(f)) - } else { - return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) - } - } - } - - hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { - if !hasTextMarshaler { - v = v.Addr() - } - - if ctx.isRoot() { - return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) - } - - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return nil, err - } - - b = enc.encodeString(b, string(text), ctx.options) - - return b, nil - } - - switch v.Kind() { - // containers - case reflect.Map: - return enc.encodeMap(b, ctx, v) - case reflect.Struct: - return enc.encodeStruct(b, ctx, v) - case reflect.Slice, reflect.Array: - return enc.encodeSlice(b, ctx, v) - case reflect.Interface: - if v.IsNil() { - return nil, fmt.Errorf("toml: encoding a nil interface is not supported") - } - - return enc.encode(b, ctx, v.Elem()) - case reflect.Ptr: - if v.IsNil() { - return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) - } - - return enc.encode(b, ctx, v.Elem()) - - // values - case reflect.String: - b = enc.encodeString(b, v.String(), ctx.options) - case reflect.Float32: - f := v.Float() - - if math.IsNaN(f) { - b = append(b, "nan"...) - } else if f > math.MaxFloat32 { - b = append(b, "inf"...) - } else if f < -math.MaxFloat32 { - b = append(b, "-inf"...) - } else if math.Trunc(f) == f { - b = strconv.AppendFloat(b, f, 'f', 1, 32) - } else { - b = strconv.AppendFloat(b, f, 'f', -1, 32) - } - case reflect.Float64: - f := v.Float() - if math.IsNaN(f) { - b = append(b, "nan"...) - } else if f > math.MaxFloat64 { - b = append(b, "inf"...) - } else if f < -math.MaxFloat64 { - b = append(b, "-inf"...) - } else if math.Trunc(f) == f { - b = strconv.AppendFloat(b, f, 'f', 1, 64) - } else { - b = strconv.AppendFloat(b, f, 'f', -1, 64) - } - case reflect.Bool: - if v.Bool() { - b = append(b, "true"...) - } else { - b = append(b, "false"...) - } - case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: - x := v.Uint() - if x > uint64(math.MaxInt64) { - return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64)) - } - b = strconv.AppendUint(b, x, 10) - case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: - b = strconv.AppendInt(b, v.Int(), 10) - default: - return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) - } - - return b, nil -} - -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Map: - return v.IsNil() - default: - return false - } -} - -func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { - return options.omitempty && isEmptyValue(v) -} - -func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { - var err error - - if !ctx.inline { - b = enc.encodeComment(ctx.indent, options.comment, b) - b = enc.commented(ctx.commented, b) - b = enc.indent(ctx.indent, b) - } - - b = enc.encodeKey(b, ctx.key) - b = append(b, " = "...) - - // create a copy of the context because the value of a KV shouldn't - // modify the global context. - subctx := ctx - subctx.insideKv = true - subctx.shiftKey() - subctx.options = options - - b, err = enc.encode(b, subctx, v) - if err != nil { - return nil, err - } - - return b, nil -} - -func (enc *Encoder) commented(commented bool, b []byte) []byte { - if commented { - return append(b, "# "...) - } - return b -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Struct: - return isEmptyStruct(v) - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func isEmptyStruct(v reflect.Value) bool { - // TODO: merge with walkStruct and cache. - typ := v.Type() - for i := 0; i < typ.NumField(); i++ { - fieldType := typ.Field(i) - - // only consider exported fields - if fieldType.PkgPath != "" { - continue - } - - tag := fieldType.Tag.Get("toml") - - // special field name to skip field - if tag == "-" { - continue - } - - f := v.Field(i) - - if !isEmptyValue(f) { - return false - } - } - - return true -} - -const literalQuote = '\'' - -func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { - if needsQuoting(v) { - return enc.encodeQuotedString(options.multiline, b, v) - } - - return enc.encodeLiteralString(b, v) -} - -func needsQuoting(v string) bool { - // TODO: vectorize - for _, b := range []byte(v) { - if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { - return true - } - } - return false -} - -// caller should have checked that the string does not contain new lines or ' . -func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { - b = append(b, literalQuote) - b = append(b, v...) - b = append(b, literalQuote) - - return b -} - -func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { - stringQuote := `"` - - if multiline { - stringQuote = `"""` - } - - b = append(b, stringQuote...) - if multiline { - b = append(b, '\n') - } - - const ( - hextable = "0123456789ABCDEF" - // U+0000 to U+0008, U+000A to U+001F, U+007F - nul = 0x0 - bs = 0x8 - lf = 0xa - us = 0x1f - del = 0x7f - ) - - for _, r := range []byte(v) { - switch r { - case '\\': - b = append(b, `\\`...) - case '"': - b = append(b, `\"`...) - case '\b': - b = append(b, `\b`...) - case '\f': - b = append(b, `\f`...) - case '\n': - if multiline { - b = append(b, r) - } else { - b = append(b, `\n`...) - } - case '\r': - b = append(b, `\r`...) - case '\t': - b = append(b, `\t`...) - default: - switch { - case r >= nul && r <= bs, r >= lf && r <= us, r == del: - b = append(b, `\u00`...) - b = append(b, hextable[r>>4]) - b = append(b, hextable[r&0x0f]) - default: - b = append(b, r) - } - } - } - - b = append(b, stringQuote...) - - return b -} - -// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . -func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { - return append(b, v...) -} - -func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { - if len(ctx.parentKey) == 0 { - return b, nil - } - - b = enc.encodeComment(ctx.indent, ctx.options.comment, b) - - b = enc.commented(ctx.commented, b) - - b = enc.indent(ctx.indent, b) - - b = append(b, '[') - - b = enc.encodeKey(b, ctx.parentKey[0]) - - for _, k := range ctx.parentKey[1:] { - b = append(b, '.') - b = enc.encodeKey(b, k) - } - - b = append(b, "]\n"...) - - return b, nil -} - -//nolint:cyclop -func (enc *Encoder) encodeKey(b []byte, k string) []byte { - needsQuotation := false - cannotUseLiteral := false - - if len(k) == 0 { - return append(b, "''"...) - } - - for _, c := range k { - if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { - continue - } - - if c == literalQuote { - cannotUseLiteral = true - } - - needsQuotation = true - } - - if needsQuotation && needsQuoting(k) { - cannotUseLiteral = true - } - - switch { - case cannotUseLiteral: - return enc.encodeQuotedString(false, b, k) - case needsQuotation: - return enc.encodeLiteralString(b, k) - default: - return enc.encodeUnquotedKey(b, k) - } -} - -func (enc *Encoder) keyToString(k reflect.Value) (string, error) { - keyType := k.Type() - switch { - case keyType.Kind() == reflect.String: - return k.String(), nil - - case keyType.Implements(textMarshalerType): - keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) - } - return string(keyB), nil - - case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: - return strconv.FormatInt(k.Int(), 10), nil - - case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: - return strconv.FormatUint(k.Uint(), 10), nil - - case keyType.Kind() == reflect.Float32: - return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil - - case keyType.Kind() == reflect.Float64: - return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil - } - return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) -} - -func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - var ( - t table - emptyValueOptions valueOptions - ) - - iter := v.MapRange() - for iter.Next() { - v := iter.Value() - - if isNil(v) { - continue - } - - k, err := enc.keyToString(iter.Key()) - if err != nil { - return nil, err - } - - if willConvertToTableOrArrayTable(ctx, v) { - t.pushTable(k, v, emptyValueOptions) - } else { - t.pushKV(k, v, emptyValueOptions) - } - } - - sortEntriesByKey(t.kvs) - sortEntriesByKey(t.tables) - - return enc.encodeTable(b, ctx, t) -} - -func sortEntriesByKey(e []entry) { - slices.SortFunc(e, func(a, b entry) int { - return strings.Compare(a.Key, b.Key) - }) -} - -type entry struct { - Key string - Value reflect.Value - Options valueOptions -} - -type table struct { - kvs []entry - tables []entry -} - -func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { - for _, e := range t.kvs { - if e.Key == k { - return - } - } - - t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) -} - -func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { - for _, e := range t.tables { - if e.Key == k { - return - } - } - t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) -} - -func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { - // TODO: cache this - typ := v.Type() - for i := 0; i < typ.NumField(); i++ { - fieldType := typ.Field(i) - - // only consider exported fields - if fieldType.PkgPath != "" { - continue - } - - tag := fieldType.Tag.Get("toml") - - // special field name to skip field - if tag == "-" { - continue - } - - k, opts := parseTag(tag) - if !isValidName(k) { - k = "" - } - - f := v.Field(i) - - if k == "" { - if fieldType.Anonymous { - if fieldType.Type.Kind() == reflect.Struct { - walkStruct(ctx, t, f) - } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { - walkStruct(ctx, t, f.Elem()) - } - continue - } else { - k = fieldType.Name - } - } - - if isNil(f) { - continue - } - - options := valueOptions{ - multiline: opts.multiline, - omitempty: opts.omitempty, - commented: opts.commented, - comment: fieldType.Tag.Get("comment"), - } - - if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { - t.pushKV(k, f, options) - } else { - t.pushTable(k, f, options) - } - } -} - -func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - var t table - - walkStruct(ctx, &t, v) - - return enc.encodeTable(b, ctx, t) -} - -func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { - for len(comment) > 0 { - var line string - idx := strings.IndexByte(comment, '\n') - if idx >= 0 { - line = comment[:idx] - comment = comment[idx+1:] - } else { - line = comment - comment = "" - } - b = enc.indent(indent, b) - b = append(b, "# "...) - b = append(b, line...) - b = append(b, '\n') - } - return b -} - -func isValidName(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - case !unicode.IsLetter(c) && !unicode.IsDigit(c): - return false - } - } - return true -} - -type tagOptions struct { - multiline bool - inline bool - omitempty bool - commented bool -} - -func parseTag(tag string) (string, tagOptions) { - opts := tagOptions{} - - idx := strings.Index(tag, ",") - if idx == -1 { - return tag, opts - } - - raw := tag[idx+1:] - tag = string(tag[:idx]) - for raw != "" { - var o string - i := strings.Index(raw, ",") - if i >= 0 { - o, raw = raw[:i], raw[i+1:] - } else { - o, raw = raw, "" - } - switch o { - case "multiline": - opts.multiline = true - case "inline": - opts.inline = true - case "omitempty": - opts.omitempty = true - case "commented": - opts.commented = true - } - } - - return tag, opts -} - -func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { - var err error - - ctx.shiftKey() - - if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { - return enc.encodeTableInline(b, ctx, t) - } - - if !ctx.skipTableHeader { - b, err = enc.encodeTableHeader(ctx, b) - if err != nil { - return nil, err - } - - if enc.indentTables && len(ctx.parentKey) > 0 { - ctx.indent++ - } - } - ctx.skipTableHeader = false - - hasNonEmptyKV := false - for _, kv := range t.kvs { - if shouldOmitEmpty(kv.Options, kv.Value) { - continue - } - hasNonEmptyKV = true - - ctx.setKey(kv.Key) - ctx2 := ctx - ctx2.commented = kv.Options.commented || ctx2.commented - - b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value) - if err != nil { - return nil, err - } - - b = append(b, '\n') - } - - first := true - for _, table := range t.tables { - if shouldOmitEmpty(table.Options, table.Value) { - continue - } - if first { - first = false - if hasNonEmptyKV { - b = append(b, '\n') - } - } else { - b = append(b, "\n"...) - } - - ctx.setKey(table.Key) - - ctx.options = table.Options - ctx2 := ctx - ctx2.commented = ctx2.commented || ctx.options.commented - - b, err = enc.encode(b, ctx2, table.Value) - if err != nil { - return nil, err - } - } - - return b, nil -} - -func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { - var err error - - b = append(b, '{') - - first := true - for _, kv := range t.kvs { - if shouldOmitEmpty(kv.Options, kv.Value) { - continue - } - - if first { - first = false - } else { - b = append(b, `, `...) - } - - ctx.setKey(kv.Key) - - b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) - if err != nil { - return nil, err - } - } - - if len(t.tables) > 0 { - panic("inline table cannot contain nested tables, only key-values") - } - - b = append(b, "}"...) - - return b, nil -} - -func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { - if !v.IsValid() { - return false - } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { - return false - } - - t := v.Type() - switch t.Kind() { - case reflect.Map, reflect.Struct: - return !ctx.inline - case reflect.Interface: - return willConvertToTable(ctx, v.Elem()) - case reflect.Ptr: - if v.IsNil() { - return false - } - - return willConvertToTable(ctx, v.Elem()) - default: - return false - } -} - -func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { - if ctx.insideKv { - return false - } - t := v.Type() - - if t.Kind() == reflect.Interface { - return willConvertToTableOrArrayTable(ctx, v.Elem()) - } - - if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { - if v.Len() == 0 { - // An empty slice should be a kv = []. - return false - } - - for i := 0; i < v.Len(); i++ { - t := willConvertToTable(ctx, v.Index(i)) - - if !t { - return false - } - } - - return true - } - - return willConvertToTable(ctx, v) -} - -func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - if v.Len() == 0 { - b = append(b, "[]"...) - - return b, nil - } - - if willConvertToTableOrArrayTable(ctx, v) { - return enc.encodeSliceAsArrayTable(b, ctx, v) - } - - return enc.encodeSliceAsArray(b, ctx, v) -} - -// caller should have checked that v is a slice that only contains values that -// encode into tables. -func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - ctx.shiftKey() - - scratch := make([]byte, 0, 64) - - scratch = enc.commented(ctx.commented, scratch) - - if enc.indentTables { - scratch = enc.indent(ctx.indent, scratch) - } - - scratch = append(scratch, "[["...) - - for i, k := range ctx.parentKey { - if i > 0 { - scratch = append(scratch, '.') - } - - scratch = enc.encodeKey(scratch, k) - } - - scratch = append(scratch, "]]\n"...) - ctx.skipTableHeader = true - - b = enc.encodeComment(ctx.indent, ctx.options.comment, b) - - if enc.indentTables { - ctx.indent++ - } - - for i := 0; i < v.Len(); i++ { - if i != 0 { - b = append(b, "\n"...) - } - - b = append(b, scratch...) - - var err error - b, err = enc.encode(b, ctx, v.Index(i)) - if err != nil { - return nil, err - } - } - - return b, nil -} - -func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { - multiline := ctx.options.multiline || enc.arraysMultiline - separator := ", " - - b = append(b, '[') - - subCtx := ctx - subCtx.options = valueOptions{} - - if multiline { - separator = ",\n" - - b = append(b, '\n') - - subCtx.indent++ - } - - var err error - first := true - - for i := 0; i < v.Len(); i++ { - if first { - first = false - } else { - b = append(b, separator...) - } - - if multiline { - b = enc.indent(subCtx.indent, b) - } - - b, err = enc.encode(b, subCtx, v.Index(i)) - if err != nil { - return nil, err - } - } - - if multiline { - b = append(b, '\n') - b = enc.indent(ctx.indent, b) - } - - b = append(b, ']') - - return b, nil -} - -func (enc *Encoder) indent(level int, b []byte) []byte { - for i := 0; i < level; i++ { - b = append(b, enc.indentSymbol...) - } - - return b -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/strict.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/strict.go deleted file mode 100644 index 802e7e4d15..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/strict.go +++ /dev/null @@ -1,107 +0,0 @@ -package toml - -import ( - "github.com/pelletier/go-toml/v2/internal/danger" - "github.com/pelletier/go-toml/v2/internal/tracker" - "github.com/pelletier/go-toml/v2/unstable" -) - -type strict struct { - Enabled bool - - // Tracks the current key being processed. - key tracker.KeyTracker - - missing []unstable.ParserError -} - -func (s *strict) EnterTable(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.UpdateTable(node) -} - -func (s *strict) EnterArrayTable(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.UpdateArrayTable(node) -} - -func (s *strict) EnterKeyValue(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.Push(node) -} - -func (s *strict) ExitKeyValue(node *unstable.Node) { - if !s.Enabled { - return - } - - s.key.Pop(node) -} - -func (s *strict) MissingTable(node *unstable.Node) { - if !s.Enabled { - return - } - - s.missing = append(s.missing, unstable.ParserError{ - Highlight: keyLocation(node), - Message: "missing table", - Key: s.key.Key(), - }) -} - -func (s *strict) MissingField(node *unstable.Node) { - if !s.Enabled { - return - } - - s.missing = append(s.missing, unstable.ParserError{ - Highlight: keyLocation(node), - Message: "missing field", - Key: s.key.Key(), - }) -} - -func (s *strict) Error(doc []byte) error { - if !s.Enabled || len(s.missing) == 0 { - return nil - } - - err := &StrictMissingError{ - Errors: make([]DecodeError, 0, len(s.missing)), - } - - for _, derr := range s.missing { - derr := derr - err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) - } - - return err -} - -func keyLocation(node *unstable.Node) []byte { - k := node.Key() - - hasOne := k.Next() - if !hasOne { - panic("should not be called with empty key") - } - - start := k.Node().Data - end := k.Node().Data - - for k.Next() { - end = k.Node().Data - } - - return danger.BytesRange(start, end) -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/toml.abnf deleted file mode 100644 index 473f3749e8..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/toml.abnf +++ /dev/null @@ -1,243 +0,0 @@ -;; This document describes TOML's syntax, using the ABNF format (defined in -;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt). -;; -;; All valid TOML documents will match this description, however certain -;; invalid documents would need to be rejected as per the semantics described -;; in the supporting text description. - -;; It is possible to try this grammar interactively, using instaparse. -;; http://instaparse.mojombo.com/ -;; -;; To do so, in the lower right, click on Options and change `:input-format` to -;; ':abnf'. Then paste this entire ABNF document into the grammar entry box -;; (above the options). Then you can type or paste a sample TOML document into -;; the beige box on the left. Tada! - -;; Overall Structure - -toml = expression *( newline expression ) - -expression = ws [ comment ] -expression =/ ws keyval ws [ comment ] -expression =/ ws table ws [ comment ] - -;; Whitespace - -ws = *wschar -wschar = %x20 ; Space -wschar =/ %x09 ; Horizontal tab - -;; Newline - -newline = %x0A ; LF -newline =/ %x0D.0A ; CRLF - -;; Comment - -comment-start-symbol = %x23 ; # -non-ascii = %x80-D7FF / %xE000-10FFFF -non-eol = %x09 / %x20-7F / non-ascii - -comment = comment-start-symbol *non-eol - -;; Key-Value pairs - -keyval = key keyval-sep val - -key = simple-key / dotted-key -simple-key = quoted-key / unquoted-key - -unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ -quoted-key = basic-string / literal-string -dotted-key = simple-key 1*( dot-sep simple-key ) - -dot-sep = ws %x2E ws ; . Period -keyval-sep = ws %x3D ws ; = - -val = string / boolean / array / inline-table / date-time / float / integer - -;; String - -string = ml-basic-string / basic-string / ml-literal-string / literal-string - -;; Basic String - -basic-string = quotation-mark *basic-char quotation-mark - -quotation-mark = %x22 ; " - -basic-char = basic-unescaped / escaped -basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii -escaped = escape escape-seq-char - -escape = %x5C ; \ -escape-seq-char = %x22 ; " quotation mark U+0022 -escape-seq-char =/ %x5C ; \ reverse solidus U+005C -escape-seq-char =/ %x62 ; b backspace U+0008 -escape-seq-char =/ %x66 ; f form feed U+000C -escape-seq-char =/ %x6E ; n line feed U+000A -escape-seq-char =/ %x72 ; r carriage return U+000D -escape-seq-char =/ %x74 ; t tab U+0009 -escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX -escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX - -;; Multiline Basic String - -ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body - ml-basic-string-delim -ml-basic-string-delim = 3quotation-mark -ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] - -mlb-content = mlb-char / newline / mlb-escaped-nl -mlb-char = mlb-unescaped / escaped -mlb-quotes = 1*2quotation-mark -mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii -mlb-escaped-nl = escape ws newline *( wschar / newline ) - -;; Literal String - -literal-string = apostrophe *literal-char apostrophe - -apostrophe = %x27 ; ' apostrophe - -literal-char = %x09 / %x20-26 / %x28-7E / non-ascii - -;; Multiline Literal String - -ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body - ml-literal-string-delim -ml-literal-string-delim = 3apostrophe -ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] - -mll-content = mll-char / newline -mll-char = %x09 / %x20-26 / %x28-7E / non-ascii -mll-quotes = 1*2apostrophe - -;; Integer - -integer = dec-int / hex-int / oct-int / bin-int - -minus = %x2D ; - -plus = %x2B ; + -underscore = %x5F ; _ -digit1-9 = %x31-39 ; 1-9 -digit0-7 = %x30-37 ; 0-7 -digit0-1 = %x30-31 ; 0-1 - -hex-prefix = %x30.78 ; 0x -oct-prefix = %x30.6F ; 0o -bin-prefix = %x30.62 ; 0b - -dec-int = [ minus / plus ] unsigned-dec-int -unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT ) - -hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG ) -oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 ) -bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 ) - -;; Float - -float = float-int-part ( exp / frac [ exp ] ) -float =/ special-float - -float-int-part = dec-int -frac = decimal-point zero-prefixable-int -decimal-point = %x2E ; . -zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT ) - -exp = "e" float-exp-part -float-exp-part = [ minus / plus ] zero-prefixable-int - -special-float = [ minus / plus ] ( inf / nan ) -inf = %x69.6e.66 ; inf -nan = %x6e.61.6e ; nan - -;; Boolean - -boolean = true / false - -true = %x74.72.75.65 ; true -false = %x66.61.6C.73.65 ; false - -;; Date and Time (as defined in RFC 3339) - -date-time = offset-date-time / local-date-time / local-date / local-time - -date-fullyear = 4DIGIT -date-month = 2DIGIT ; 01-12 -date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year -time-delim = "T" / %x20 ; T, t, or space -time-hour = 2DIGIT ; 00-23 -time-minute = 2DIGIT ; 00-59 -time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules -time-secfrac = "." 1*DIGIT -time-numoffset = ( "+" / "-" ) time-hour ":" time-minute -time-offset = "Z" / time-numoffset - -partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ] -full-date = date-fullyear "-" date-month "-" date-mday -full-time = partial-time time-offset - -;; Offset Date-Time - -offset-date-time = full-date time-delim full-time - -;; Local Date-Time - -local-date-time = full-date time-delim partial-time - -;; Local Date - -local-date = full-date - -;; Local Time - -local-time = partial-time - -;; Array - -array = array-open [ array-values ] ws-comment-newline array-close - -array-open = %x5B ; [ -array-close = %x5D ; ] - -array-values = ws-comment-newline val ws-comment-newline array-sep array-values -array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] - -array-sep = %x2C ; , Comma - -ws-comment-newline = *( wschar / [ comment ] newline ) - -;; Table - -table = std-table / array-table - -;; Standard Table - -std-table = std-table-open key std-table-close - -std-table-open = %x5B ws ; [ Left square bracket -std-table-close = ws %x5D ; ] Right square bracket - -;; Inline Table - -inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close - -inline-table-open = %x7B ws ; { -inline-table-close = ws %x7D ; } -inline-table-sep = ws %x2C ws ; , Comma - -inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] - -;; Array Table - -array-table = array-table-open key array-table-close - -array-table-open = %x5B.5B ws ; [[ Double left square bracket -array-table-close = ws %x5D.5D ; ]] Double right square bracket - -;; Built-in ABNF terms, reproduced here for clarity - -ALPHA = %x41-5A / %x61-7A ; A-Z / a-z -DIGIT = %x30-39 ; 0-9 -HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/types.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/types.go deleted file mode 100644 index 3c6b8fe570..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/types.go +++ /dev/null @@ -1,14 +0,0 @@ -package toml - -import ( - "encoding" - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) -var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) -var stringType = reflect.TypeOf("") diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go deleted file mode 100644 index c3df8bee1c..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ /dev/null @@ -1,1334 +0,0 @@ -package toml - -import ( - "encoding" - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync/atomic" - "time" - - "github.com/pelletier/go-toml/v2/internal/danger" - "github.com/pelletier/go-toml/v2/internal/tracker" - "github.com/pelletier/go-toml/v2/unstable" -) - -// Unmarshal deserializes a TOML document into a Go value. -// -// It is a shortcut for Decoder.Decode() with the default options. -func Unmarshal(data []byte, v interface{}) error { - d := decoder{} - d.p.Reset(data) - return d.FromParser(v) -} - -// Decoder reads and decode a TOML document from an input stream. -type Decoder struct { - // input - r io.Reader - - // global settings - strict bool - - // toggles unmarshaler interface - unmarshalerInterface bool -} - -// NewDecoder creates a new Decoder that will read from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r: r} -} - -// DisallowUnknownFields causes the Decoder to return an error when the -// destination is a struct and the input contains a key that does not match a -// non-ignored field. -// -// In that case, the Decoder returns a StrictMissingError that can be used to -// retrieve the individual errors as well as generate a human readable -// description of the missing fields. -func (d *Decoder) DisallowUnknownFields() *Decoder { - d.strict = true - return d -} - -// EnableUnmarshalerInterface allows to enable unmarshaler interface. -// -// With this feature enabled, types implementing the unstable/Unmarshaler -// interface can be decoded from any structure of the document. It allows types -// that don't have a straightfoward TOML representation to provide their own -// decoding logic. -// -// Currently, types can only decode from a single value. Tables and array tables -// are not supported. -// -// *Unstable:* This method does not follow the compatibility guarantees of -// semver. It can be changed or removed without a new major version being -// issued. -func (d *Decoder) EnableUnmarshalerInterface() *Decoder { - d.unmarshalerInterface = true - return d -} - -// Decode the whole content of r into v. -// -// By default, values in the document that don't exist in the target Go value -// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. -// -// When a TOML local date, time, or date-time is decoded into a time.Time, its -// value is represented in time.Local timezone. Otherwise the appropriate Local* -// structure is used. For time values, precision up to the nanosecond is -// supported by truncating extra digits. -// -// Empty tables decoded in an interface{} create an empty initialized -// map[string]interface{}. -// -// Types implementing the encoding.TextUnmarshaler interface are decoded from a -// TOML string. -// -// When decoding a number, go-toml will return an error if the number is out of -// bounds for the target type (which includes negative numbers when decoding -// into an unsigned int). -// -// If an error occurs while decoding the content of the document, this function -// returns a toml.DecodeError, providing context about the issue. When using -// strict mode and a field is missing, a `toml.StrictMissingError` is -// returned. In any other case, this function returns a standard Go error. -// -// # Type mapping -// -// List of supported TOML types and their associated accepted Go types: -// -// String -> string -// Integer -> uint*, int*, depending on size -// Float -> float*, depending on size -// Boolean -> bool -// Offset Date-Time -> time.Time -// Local Date-time -> LocalDateTime, time.Time -// Local Date -> LocalDate, time.Time -// Local Time -> LocalTime, time.Time -// Array -> slice and array, depending on elements types -// Table -> map and struct -// Inline Table -> same as Table -// Array of Tables -> same as Array and Table -func (d *Decoder) Decode(v interface{}) error { - b, err := io.ReadAll(d.r) - if err != nil { - return fmt.Errorf("toml: %w", err) - } - - dec := decoder{ - strict: strict{ - Enabled: d.strict, - }, - unmarshalerInterface: d.unmarshalerInterface, - } - dec.p.Reset(b) - - return dec.FromParser(v) -} - -type decoder struct { - // Which parser instance in use for this decoding session. - p unstable.Parser - - // Flag indicating that the current expression is stashed. - // If set to true, calling nextExpr will not actually pull a new expression - // but turn off the flag instead. - stashedExpr bool - - // Skip expressions until a table is found. This is set to true when a - // table could not be created (missing field in map), so all KV expressions - // need to be skipped. - skipUntilTable bool - - // Flag indicating that the current array/slice table should be cleared because - // it is the first encounter of an array table. - clearArrayTable bool - - // Tracks position in Go arrays. - // This is used when decoding [[array tables]] into Go arrays. Given array - // tables are separate TOML expression, we need to keep track of where we - // are at in the Go array, as we can't just introspect its size. - arrayIndexes map[reflect.Value]int - - // Tracks keys that have been seen, with which type. - seen tracker.SeenTracker - - // Strict mode - strict strict - - // Flag that enables/disables unmarshaler interface. - unmarshalerInterface bool - - // Current context for the error. - errorContext *errorContext -} - -type errorContext struct { - Struct reflect.Type - Field []int -} - -func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { - return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target)) -} - -func (d *decoder) typeMismatchString(toml string, target reflect.Type) string { - if d.errorContext != nil && d.errorContext.Struct != nil { - ctx := d.errorContext - f := ctx.Struct.FieldByIndex(ctx.Field) - return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) - } - return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target) -} - -func (d *decoder) expr() *unstable.Node { - return d.p.Expression() -} - -func (d *decoder) nextExpr() bool { - if d.stashedExpr { - d.stashedExpr = false - return true - } - return d.p.NextExpression() -} - -func (d *decoder) stashExpr() { - d.stashedExpr = true -} - -func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { - if d.arrayIndexes == nil { - d.arrayIndexes = make(map[reflect.Value]int, 1) - } - - idx, ok := d.arrayIndexes[v] - - if !ok { - d.arrayIndexes[v] = 0 - } else if shouldAppend { - idx++ - d.arrayIndexes[v] = idx - } - - return idx -} - -func (d *decoder) FromParser(v interface{}) error { - r := reflect.ValueOf(v) - if r.Kind() != reflect.Ptr { - return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) - } - - if r.IsNil() { - return fmt.Errorf("toml: decoding pointer target cannot be nil") - } - - r = r.Elem() - if r.Kind() == reflect.Interface && r.IsNil() { - newMap := map[string]interface{}{} - r.Set(reflect.ValueOf(newMap)) - } - - err := d.fromParser(r) - if err == nil { - return d.strict.Error(d.p.Data()) - } - - var e *unstable.ParserError - if errors.As(err, &e) { - return wrapDecodeError(d.p.Data(), e) - } - - return err -} - -func (d *decoder) fromParser(root reflect.Value) error { - for d.nextExpr() { - err := d.handleRootExpression(d.expr(), root) - if err != nil { - return err - } - } - - return d.p.Error() -} - -/* -Rules for the unmarshal code: - -- The stack is used to keep track of which values need to be set where. -- handle* functions <=> switch on a given unstable.Kind. -- unmarshalX* functions need to unmarshal a node of kind X. -- An "object" is either a struct or a map. -*/ - -func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { - var x reflect.Value - var err error - var first bool // used for to clear array tables on first use - - if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { - first, err = d.seen.CheckExpression(expr) - if err != nil { - return err - } - } - - switch expr.Kind { - case unstable.KeyValue: - if d.skipUntilTable { - return nil - } - x, err = d.handleKeyValue(expr, v) - case unstable.Table: - d.skipUntilTable = false - d.strict.EnterTable(expr) - x, err = d.handleTable(expr.Key(), v) - case unstable.ArrayTable: - d.skipUntilTable = false - d.strict.EnterArrayTable(expr) - d.clearArrayTable = first - x, err = d.handleArrayTable(expr.Key(), v) - default: - panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) - } - - if d.skipUntilTable { - if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { - d.strict.MissingTable(expr) - } - } else if err == nil && x.IsValid() { - v.Set(x) - } - - return err -} - -func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - if key.Next() { - return d.handleArrayTablePart(key, v) - } - return d.handleKeyValues(v) -} - -func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - switch v.Kind() { - case reflect.Interface: - elem := v.Elem() - if !elem.IsValid() { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if elem.Kind() == reflect.Slice { - if elem.Type() != sliceInterfaceType { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if !elem.CanSet() { - nelem := reflect.New(sliceInterfaceType).Elem() - nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) - reflect.Copy(nelem, elem) - elem = nelem - } - if d.clearArrayTable && elem.Len() > 0 { - elem.SetLen(0) - d.clearArrayTable = false - } - } - return d.handleArrayTableCollectionLast(key, elem) - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - elem = ptr.Elem() - } - - elem, err := d.handleArrayTableCollectionLast(key, elem) - if err != nil { - return reflect.Value{}, err - } - v.Elem().Set(elem) - - return v, nil - case reflect.Slice: - if d.clearArrayTable && v.Len() > 0 { - v.SetLen(0) - d.clearArrayTable = false - } - elemType := v.Type().Elem() - var elem reflect.Value - if elemType.Kind() == reflect.Interface { - elem = makeMapStringInterface() - } else { - elem = reflect.New(elemType).Elem() - } - elem2, err := d.handleArrayTable(key, elem) - if err != nil { - return reflect.Value{}, err - } - if elem2.IsValid() { - elem = elem2 - } - return reflect.Append(v, elem), nil - case reflect.Array: - idx := d.arrayIndex(true, v) - if idx >= v.Len() { - return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) - } - elem := v.Index(idx) - _, err := d.handleArrayTable(key, elem) - return v, err - default: - return reflect.Value{}, d.typeMismatchError("array table", v.Type()) - } -} - -// When parsing an array table expression, each part of the key needs to be -// evaluated like a normal key, but if it returns a collection, it also needs to -// point to the last element of the collection. Unless it is the last part of -// the key, then it needs to create a new element at the end. -func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - if key.IsLast() { - return d.handleArrayTableCollectionLast(key, v) - } - - switch v.Kind() { - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - elem = ptr.Elem() - } - - elem, err := d.handleArrayTableCollection(key, elem) - if err != nil { - return reflect.Value{}, err - } - if elem.IsValid() { - v.Elem().Set(elem) - } - - return v, nil - case reflect.Slice: - elem := v.Index(v.Len() - 1) - x, err := d.handleArrayTable(key, elem) - if err != nil || d.skipUntilTable { - return reflect.Value{}, err - } - if x.IsValid() { - elem.Set(x) - } - - return v, err - case reflect.Array: - idx := d.arrayIndex(false, v) - if idx >= v.Len() { - return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) - } - elem := v.Index(idx) - _, err := d.handleArrayTable(key, elem) - return v, err - } - - return d.handleArrayTable(key, v) -} - -func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { - var rv reflect.Value - - // First, dispatch over v to make sure it is a valid object. - // There is no guarantee over what it could be. - switch v.Kind() { - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - v.Set(reflect.New(v.Type().Elem())) - } - elem = v.Elem() - return d.handleKeyPart(key, elem, nextFn, makeFn) - case reflect.Map: - vt := v.Type() - - // Create the key for the map element. Convert to key type. - mk, err := d.keyFromData(vt.Key(), key.Node().Data) - if err != nil { - return reflect.Value{}, err - } - - // If the map does not exist, create it. - if v.IsNil() { - vt := v.Type() - v = reflect.MakeMap(vt) - rv = v - } - - mv := v.MapIndex(mk) - set := false - if !mv.IsValid() { - // If there is no value in the map, create a new one according to - // the map type. If the element type is interface, create either a - // map[string]interface{} or a []interface{} depending on whether - // this is the last part of the array table key. - - t := vt.Elem() - if t.Kind() == reflect.Interface { - mv = makeFn() - } else { - mv = reflect.New(t).Elem() - } - set = true - } else if mv.Kind() == reflect.Interface { - mv = mv.Elem() - if !mv.IsValid() { - mv = makeFn() - } - set = true - } else if !mv.CanAddr() { - vt := v.Type() - t := vt.Elem() - oldmv := mv - mv = reflect.New(t).Elem() - mv.Set(oldmv) - set = true - } - - x, err := nextFn(key, mv) - if err != nil { - return reflect.Value{}, err - } - - if x.IsValid() { - mv = x - set = true - } - - if set { - v.SetMapIndex(mk, mv) - } - case reflect.Struct: - path, found := structFieldPath(v, string(key.Node().Data)) - if !found { - d.skipUntilTable = true - return reflect.Value{}, nil - } - - if d.errorContext == nil { - d.errorContext = new(errorContext) - } - t := v.Type() - d.errorContext.Struct = t - d.errorContext.Field = path - - f := fieldByIndex(v, path) - x, err := nextFn(key, f) - if err != nil || d.skipUntilTable { - return reflect.Value{}, err - } - if x.IsValid() { - f.Set(x) - } - d.errorContext.Field = nil - d.errorContext.Struct = nil - case reflect.Interface: - if v.Elem().IsValid() { - v = v.Elem() - } else { - v = makeMapStringInterface() - } - - x, err := d.handleKeyPart(key, v, nextFn, makeFn) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - v = x - } - rv = v - default: - panic(fmt.Errorf("unhandled part: %s", v.Kind())) - } - - return rv, nil -} - -// HandleArrayTablePart navigates the Go structure v using the key v. It is -// only used for the prefix (non-last) parts of an array-table. When -// encountering a collection, it should go to the last element. -func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - var makeFn valueMakerFn - if key.IsLast() { - makeFn = makeSliceInterface - } else { - makeFn = makeMapStringInterface - } - return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) -} - -// HandleTable returns a reference when it has checked the next expression but -// cannot handle it. -func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - if v.Kind() == reflect.Slice { - if v.Len() == 0 { - return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") - } - elem := v.Index(v.Len() - 1) - x, err := d.handleTable(key, elem) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - elem.Set(x) - } - return reflect.Value{}, nil - } - if key.Next() { - // Still scoping the key - return d.handleTablePart(key, v) - } - // Done scoping the key. - // Now handle all the key-value expressions in this table. - return d.handleKeyValues(v) -} - -// Handle root expressions until the end of the document or the next -// non-key-value. -func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { - var rv reflect.Value - for d.nextExpr() { - expr := d.expr() - if expr.Kind != unstable.KeyValue { - // Stash the expression so that fromParser can just loop and use - // the right handler. - // We could just recurse ourselves here, but at least this gives a - // chance to pop the stack a bit. - d.stashExpr() - break - } - - _, err := d.seen.CheckExpression(expr) - if err != nil { - return reflect.Value{}, err - } - - x, err := d.handleKeyValue(expr, v) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - v = x - rv = x - } - } - return rv, nil -} - -type ( - handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) - valueMakerFn func() reflect.Value -) - -func makeMapStringInterface() reflect.Value { - return reflect.MakeMap(mapStringInterfaceType) -} - -func makeSliceInterface() reflect.Value { - return reflect.MakeSlice(sliceInterfaceType, 0, 16) -} - -func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { - return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) -} - -func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { - // Special case for time, because we allow to unmarshal to it from - // different kind of AST nodes. - if v.Type() == timeType { - return false, nil - } - - if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { - err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) - if err != nil { - return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) - } - - return true, nil - } - - return false, nil -} - -func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { - for v.Kind() == reflect.Ptr { - v = initAndDereferencePointer(v) - } - - if d.unmarshalerInterface { - if v.CanAddr() && v.Addr().CanInterface() { - if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { - return outi.UnmarshalTOML(value) - } - } - } - - ok, err := d.tryTextUnmarshaler(value, v) - if ok || err != nil { - return err - } - - switch value.Kind { - case unstable.String: - return d.unmarshalString(value, v) - case unstable.Integer: - return d.unmarshalInteger(value, v) - case unstable.Float: - return d.unmarshalFloat(value, v) - case unstable.Bool: - return d.unmarshalBool(value, v) - case unstable.DateTime: - return d.unmarshalDateTime(value, v) - case unstable.LocalDate: - return d.unmarshalLocalDate(value, v) - case unstable.LocalTime: - return d.unmarshalLocalTime(value, v) - case unstable.LocalDateTime: - return d.unmarshalLocalDateTime(value, v) - case unstable.InlineTable: - return d.unmarshalInlineTable(value, v) - case unstable.Array: - return d.unmarshalArray(value, v) - default: - panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) - } -} - -func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { - switch v.Kind() { - case reflect.Slice: - if v.IsNil() { - v.Set(reflect.MakeSlice(v.Type(), 0, 16)) - } else { - v.SetLen(0) - } - case reflect.Array: - // arrays are always initialized - case reflect.Interface: - elem := v.Elem() - if !elem.IsValid() { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if elem.Kind() == reflect.Slice { - if elem.Type() != sliceInterfaceType { - elem = reflect.New(sliceInterfaceType).Elem() - elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) - } else if !elem.CanSet() { - nelem := reflect.New(sliceInterfaceType).Elem() - nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) - reflect.Copy(nelem, elem) - elem = nelem - } - } - err := d.unmarshalArray(array, elem) - if err != nil { - return err - } - v.Set(elem) - return nil - default: - // TODO: use newDecodeError, but first the parser needs to fill - // array.Data. - return d.typeMismatchError("array", v.Type()) - } - - elemType := v.Type().Elem() - - it := array.Children() - idx := 0 - for it.Next() { - n := it.Node() - - // TODO: optimize - if v.Kind() == reflect.Slice { - elem := reflect.New(elemType).Elem() - - err := d.handleValue(n, elem) - if err != nil { - return err - } - - v.Set(reflect.Append(v, elem)) - } else { // array - if idx >= v.Len() { - return nil - } - elem := v.Index(idx) - err := d.handleValue(n, elem) - if err != nil { - return err - } - idx++ - } - } - - return nil -} - -func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { - // Make sure v is an initialized object. - switch v.Kind() { - case reflect.Map: - if v.IsNil() { - v.Set(reflect.MakeMap(v.Type())) - } - case reflect.Struct: - // structs are always initialized. - case reflect.Interface: - elem := v.Elem() - if !elem.IsValid() { - elem = makeMapStringInterface() - v.Set(elem) - } - return d.unmarshalInlineTable(itable, elem) - default: - return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind()) - } - - it := itable.Children() - for it.Next() { - n := it.Node() - - x, err := d.handleKeyValue(n, v) - if err != nil { - return err - } - if x.IsValid() { - v = x - } - } - - return nil -} - -func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { - dt, err := parseDateTime(value.Data) - if err != nil { - return err - } - - v.Set(reflect.ValueOf(dt)) - return nil -} - -func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { - ld, err := parseLocalDate(value.Data) - if err != nil { - return err - } - - if v.Type() == timeType { - cast := ld.AsTime(time.Local) - v.Set(reflect.ValueOf(cast)) - return nil - } - - v.Set(reflect.ValueOf(ld)) - - return nil -} - -func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { - lt, rest, err := parseLocalTime(value.Data) - if err != nil { - return err - } - - if len(rest) > 0 { - return unstable.NewParserError(rest, "extra characters at the end of a local time") - } - - v.Set(reflect.ValueOf(lt)) - return nil -} - -func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { - ldt, rest, err := parseLocalDateTime(value.Data) - if err != nil { - return err - } - - if len(rest) > 0 { - return unstable.NewParserError(rest, "extra characters at the end of a local date time") - } - - if v.Type() == timeType { - cast := ldt.AsTime(time.Local) - - v.Set(reflect.ValueOf(cast)) - return nil - } - - v.Set(reflect.ValueOf(ldt)) - - return nil -} - -func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { - b := value.Data[0] == 't' - - switch v.Kind() { - case reflect.Bool: - v.SetBool(b) - case reflect.Interface: - v.Set(reflect.ValueOf(b)) - default: - return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) - } - - return nil -} - -func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { - f, err := parseFloat(value.Data) - if err != nil { - return err - } - - switch v.Kind() { - case reflect.Float64: - v.SetFloat(f) - case reflect.Float32: - if f > math.MaxFloat32 { - return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) - } - v.SetFloat(f) - case reflect.Interface: - v.Set(reflect.ValueOf(f)) - default: - return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) - } - - return nil -} - -const ( - maxInt = int64(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -// Maximum value of uint for decoding. Currently the decoder parses the integer -// into an int64. As a result, on architectures where uint is 64 bits, the -// effective maximum uint we can decode is the maximum of int64. On -// architectures where uint is 32 bits, the maximum value we can decode is -// lower: the maximum of uint32. I didn't find a way to figure out this value at -// compile time, so it is computed during initialization. -var maxUint int64 = math.MaxInt64 - -func init() { - m := uint64(^uint(0)) - if m < uint64(maxUint) { - maxUint = int64(m) - } -} - -func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { - kind := v.Kind() - if kind == reflect.Float32 || kind == reflect.Float64 { - return d.unmarshalFloat(value, v) - } - - i, err := parseInteger(value.Data) - if err != nil { - return err - } - - var r reflect.Value - - switch kind { - case reflect.Int64: - v.SetInt(i) - return nil - case reflect.Int32: - if i < math.MinInt32 || i > math.MaxInt32 { - return fmt.Errorf("toml: number %d does not fit in an int32", i) - } - - r = reflect.ValueOf(int32(i)) - case reflect.Int16: - if i < math.MinInt16 || i > math.MaxInt16 { - return fmt.Errorf("toml: number %d does not fit in an int16", i) - } - - r = reflect.ValueOf(int16(i)) - case reflect.Int8: - if i < math.MinInt8 || i > math.MaxInt8 { - return fmt.Errorf("toml: number %d does not fit in an int8", i) - } - - r = reflect.ValueOf(int8(i)) - case reflect.Int: - if i < minInt || i > maxInt { - return fmt.Errorf("toml: number %d does not fit in an int", i) - } - - r = reflect.ValueOf(int(i)) - case reflect.Uint64: - if i < 0 { - return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) - } - - r = reflect.ValueOf(uint64(i)) - case reflect.Uint32: - if i < 0 || i > math.MaxUint32 { - return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) - } - - r = reflect.ValueOf(uint32(i)) - case reflect.Uint16: - if i < 0 || i > math.MaxUint16 { - return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) - } - - r = reflect.ValueOf(uint16(i)) - case reflect.Uint8: - if i < 0 || i > math.MaxUint8 { - return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) - } - - r = reflect.ValueOf(uint8(i)) - case reflect.Uint: - if i < 0 || i > maxUint { - return fmt.Errorf("toml: negative number %d does not fit in an uint", i) - } - - r = reflect.ValueOf(uint(i)) - case reflect.Interface: - r = reflect.ValueOf(i) - default: - return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) - } - - if !r.Type().AssignableTo(v.Type()) { - r = r.Convert(v.Type()) - } - - v.Set(r) - - return nil -} - -func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { - switch v.Kind() { - case reflect.String: - v.SetString(string(value.Data)) - case reflect.Interface: - v.Set(reflect.ValueOf(string(value.Data))) - default: - return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) - } - - return nil -} - -func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { - d.strict.EnterKeyValue(expr) - - v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) - if d.skipUntilTable { - d.strict.MissingField(expr) - d.skipUntilTable = false - } - - d.strict.ExitKeyValue(expr) - - return v, err -} - -func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { - if key.Next() { - // Still scoping the key - return d.handleKeyValuePart(key, value, v) - } - // Done scoping the key. - // v is whatever Go value we need to fill. - return reflect.Value{}, d.handleValue(value, v) -} - -func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, error) { - switch { - case stringType.AssignableTo(keyType): - return reflect.ValueOf(string(data)), nil - - case stringType.ConvertibleTo(keyType): - return reflect.ValueOf(string(data)).Convert(keyType), nil - - case keyType.Implements(textUnmarshalerType): - mk := reflect.New(keyType.Elem()) - if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) - } - return mk, nil - - case reflect.PointerTo(keyType).Implements(textUnmarshalerType): - mk := reflect.New(keyType) - if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) - } - return mk.Elem(), nil - - case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: - key, err := strconv.ParseInt(string(data), 10, 64) - if err != nil { - return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) - } - return reflect.ValueOf(key).Convert(keyType), nil - case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: - key, err := strconv.ParseUint(string(data), 10, 64) - if err != nil { - return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) - } - return reflect.ValueOf(key).Convert(keyType), nil - - case keyType.Kind() == reflect.Float32: - key, err := strconv.ParseFloat(string(data), 32) - if err != nil { - return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) - } - return reflect.ValueOf(float32(key)), nil - - case keyType.Kind() == reflect.Float64: - key, err := strconv.ParseFloat(string(data), 64) - if err != nil { - return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) - } - return reflect.ValueOf(float64(key)), nil - } - return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) -} - -func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { - // contains the replacement for v - var rv reflect.Value - - // First, dispatch over v to make sure it is a valid object. - // There is no guarantee over what it could be. - switch v.Kind() { - case reflect.Map: - vt := v.Type() - - mk, err := d.keyFromData(vt.Key(), key.Node().Data) - if err != nil { - return reflect.Value{}, err - } - - // If the map does not exist, create it. - if v.IsNil() { - v = reflect.MakeMap(vt) - rv = v - } - - mv := v.MapIndex(mk) - set := false - if !mv.IsValid() || key.IsLast() { - set = true - mv = reflect.New(v.Type().Elem()).Elem() - } - - nv, err := d.handleKeyValueInner(key, value, mv) - if err != nil { - return reflect.Value{}, err - } - if nv.IsValid() { - mv = nv - set = true - } - - if set { - v.SetMapIndex(mk, mv) - } - case reflect.Struct: - path, found := structFieldPath(v, string(key.Node().Data)) - if !found { - d.skipUntilTable = true - break - } - - if d.errorContext == nil { - d.errorContext = new(errorContext) - } - t := v.Type() - d.errorContext.Struct = t - d.errorContext.Field = path - - f := fieldByIndex(v, path) - - if !f.CanAddr() { - // If the field is not addressable, need to take a slower path and - // make a copy of the struct itself to a new location. - nvp := reflect.New(v.Type()) - nvp.Elem().Set(v) - v = nvp.Elem() - _, err := d.handleKeyValuePart(key, value, v) - if err != nil { - return reflect.Value{}, err - } - return nvp.Elem(), nil - } - x, err := d.handleKeyValueInner(key, value, f) - if err != nil { - return reflect.Value{}, err - } - - if x.IsValid() { - f.Set(x) - } - d.errorContext.Struct = nil - d.errorContext.Field = nil - case reflect.Interface: - v = v.Elem() - - // Following encoding/json: decoding an object into an - // interface{}, it needs to always hold a - // map[string]interface{}. This is for the types to be - // consistent whether a previous value was set or not. - if !v.IsValid() || v.Type() != mapStringInterfaceType { - v = makeMapStringInterface() - } - - x, err := d.handleKeyValuePart(key, value, v) - if err != nil { - return reflect.Value{}, err - } - if x.IsValid() { - v = x - } - rv = v - case reflect.Ptr: - elem := v.Elem() - if !elem.IsValid() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - rv = v - elem = ptr.Elem() - } - - elem2, err := d.handleKeyValuePart(key, value, elem) - if err != nil { - return reflect.Value{}, err - } - if elem2.IsValid() { - elem = elem2 - } - v.Elem().Set(elem) - default: - return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) - } - - return rv, nil -} - -func initAndDereferencePointer(v reflect.Value) reflect.Value { - var elem reflect.Value - if v.IsNil() { - ptr := reflect.New(v.Type().Elem()) - v.Set(ptr) - } - elem = v.Elem() - return elem -} - -// Same as reflect.Value.FieldByIndex, but creates pointers if needed. -func fieldByIndex(v reflect.Value, path []int) reflect.Value { - for _, x := range path { - v = v.Field(x) - - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - } - return v -} - -type fieldPathsMap = map[string][]int - -var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap - -func structFieldPath(v reflect.Value, name string) ([]int, bool) { - t := v.Type() - - cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) - fieldPaths, ok := cache[danger.MakeTypeID(t)] - - if !ok { - fieldPaths = map[string][]int{} - - forEachField(t, nil, func(name string, path []int) { - fieldPaths[name] = path - // extra copy for the case-insensitive match - fieldPaths[strings.ToLower(name)] = path - }) - - newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) - newCache[danger.MakeTypeID(t)] = fieldPaths - for k, v := range cache { - newCache[k] = v - } - globalFieldPathsCache.Store(newCache) - } - - path, ok := fieldPaths[name] - if !ok { - path, ok = fieldPaths[strings.ToLower(name)] - } - return path, ok -} - -func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { - n := t.NumField() - for i := 0; i < n; i++ { - f := t.Field(i) - - if !f.Anonymous && f.PkgPath != "" { - // only consider exported fields. - continue - } - - fieldPath := append(path, i) - fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] - - name := f.Tag.Get("toml") - if name == "-" { - continue - } - - if i := strings.IndexByte(name, ','); i >= 0 { - name = name[:i] - } - - if f.Anonymous && name == "" { - t2 := f.Type - if t2.Kind() == reflect.Ptr { - t2 = t2.Elem() - } - - if t2.Kind() == reflect.Struct { - forEachField(t2, fieldPath, do) - } - continue - } - - if name == "" { - name = f.Name - } - - do(name, fieldPath) - } -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go deleted file mode 100644 index f526bf2c09..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go +++ /dev/null @@ -1,136 +0,0 @@ -package unstable - -import ( - "fmt" - "unsafe" - - "github.com/pelletier/go-toml/v2/internal/danger" -) - -// Iterator over a sequence of nodes. -// -// Starts uninitialized, you need to call Next() first. -// -// For example: -// -// it := n.Children() -// for it.Next() { -// n := it.Node() -// // do something with n -// } -type Iterator struct { - started bool - node *Node -} - -// Next moves the iterator forward and returns true if points to a -// node, false otherwise. -func (c *Iterator) Next() bool { - if !c.started { - c.started = true - } else if c.node.Valid() { - c.node = c.node.Next() - } - return c.node.Valid() -} - -// IsLast returns true if the current node of the iterator is the last -// one. Subsequent calls to Next() will return false. -func (c *Iterator) IsLast() bool { - return c.node.next == 0 -} - -// Node returns a pointer to the node pointed at by the iterator. -func (c *Iterator) Node() *Node { - return c.node -} - -// Node in a TOML expression AST. -// -// Depending on Kind, its sequence of children should be interpreted -// differently. -// -// - Array have one child per element in the array. -// - InlineTable have one child per key-value in the table (each of kind -// InlineTable). -// - KeyValue have at least two children. The first one is the value. The rest -// make a potentially dotted key. -// - Table and ArrayTable's children represent a dotted key (same as -// KeyValue, but without the first node being the value). -// -// When relevant, Raw describes the range of bytes this node is referring to in -// the input document. Use Parser.Raw() to retrieve the actual bytes. -type Node struct { - Kind Kind - Raw Range // Raw bytes from the input. - Data []byte // Node value (either allocated or referencing the input). - - // References to other nodes, as offsets in the backing array - // from this node. References can go backward, so those can be - // negative. - next int // 0 if last element - child int // 0 if no child -} - -// Range of bytes in the document. -type Range struct { - Offset uint32 - Length uint32 -} - -// Next returns a pointer to the next node, or nil if there is no next node. -func (n *Node) Next() *Node { - if n.next == 0 { - return nil - } - ptr := unsafe.Pointer(n) - size := unsafe.Sizeof(Node{}) - return (*Node)(danger.Stride(ptr, size, n.next)) -} - -// Child returns a pointer to the first child node of this node. Other children -// can be accessed calling Next on the first child. Returns an nil if this Node -// has no child. -func (n *Node) Child() *Node { - if n.child == 0 { - return nil - } - ptr := unsafe.Pointer(n) - size := unsafe.Sizeof(Node{}) - return (*Node)(danger.Stride(ptr, size, n.child)) -} - -// Valid returns true if the node's kind is set (not to Invalid). -func (n *Node) Valid() bool { - return n != nil -} - -// Key returns the children nodes making the Key on a supported node. Panics -// otherwise. They are guaranteed to be all be of the Kind Key. A simple key -// would return just one element. -func (n *Node) Key() Iterator { - switch n.Kind { - case KeyValue: - value := n.Child() - if !value.Valid() { - panic(fmt.Errorf("KeyValue should have at least two children")) - } - return Iterator{node: value.Next()} - case Table, ArrayTable: - return Iterator{node: n.Child()} - default: - panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) - } -} - -// Value returns a pointer to the value node of a KeyValue. -// Guaranteed to be non-nil. Panics if not called on a KeyValue node, -// or if the Children are malformed. -func (n *Node) Value() *Node { - return n.Child() -} - -// Children returns an iterator over a node's children. -func (n *Node) Children() Iterator { - return Iterator{node: n.Child()} -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go deleted file mode 100644 index 9538e30df9..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go +++ /dev/null @@ -1,71 +0,0 @@ -package unstable - -// root contains a full AST. -// -// It is immutable once constructed with Builder. -type root struct { - nodes []Node -} - -// Iterator over the top level nodes. -func (r *root) Iterator() Iterator { - it := Iterator{} - if len(r.nodes) > 0 { - it.node = &r.nodes[0] - } - return it -} - -func (r *root) at(idx reference) *Node { - return &r.nodes[idx] -} - -type reference int - -const invalidReference reference = -1 - -func (r reference) Valid() bool { - return r != invalidReference -} - -type builder struct { - tree root - lastIdx int -} - -func (b *builder) Tree() *root { - return &b.tree -} - -func (b *builder) NodeAt(ref reference) *Node { - return b.tree.at(ref) -} - -func (b *builder) Reset() { - b.tree.nodes = b.tree.nodes[:0] - b.lastIdx = 0 -} - -func (b *builder) Push(n Node) reference { - b.lastIdx = len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - return reference(b.lastIdx) -} - -func (b *builder) PushAndChain(n Node) reference { - newIdx := len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - if b.lastIdx >= 0 { - b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx - } - b.lastIdx = newIdx - return reference(b.lastIdx) -} - -func (b *builder) AttachChild(parent reference, child reference) { - b.tree.nodes[parent].child = int(child) - int(parent) -} - -func (b *builder) Chain(from reference, to reference) { - b.tree.nodes[from].next = int(to) - int(from) -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go deleted file mode 100644 index 7ff26c53c7..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package unstable provides APIs that do not meet the backward compatibility -// guarantees yet. -package unstable diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go deleted file mode 100644 index ff9df1bef8..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go +++ /dev/null @@ -1,71 +0,0 @@ -package unstable - -import "fmt" - -// Kind represents the type of TOML structure contained in a given Node. -type Kind int - -const ( - // Meta - Invalid Kind = iota - Comment - Key - - // Top level structures - Table - ArrayTable - KeyValue - - // Containers values - Array - InlineTable - - // Values - String - Bool - Float - Integer - LocalDate - LocalTime - LocalDateTime - DateTime -) - -// String implementation of fmt.Stringer. -func (k Kind) String() string { - switch k { - case Invalid: - return "Invalid" - case Comment: - return "Comment" - case Key: - return "Key" - case Table: - return "Table" - case ArrayTable: - return "ArrayTable" - case KeyValue: - return "KeyValue" - case Array: - return "Array" - case InlineTable: - return "InlineTable" - case String: - return "String" - case Bool: - return "Bool" - case Float: - return "Float" - case Integer: - return "Integer" - case LocalDate: - return "LocalDate" - case LocalTime: - return "LocalTime" - case LocalDateTime: - return "LocalDateTime" - case DateTime: - return "DateTime" - } - panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go deleted file mode 100644 index 50358a44ff..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go +++ /dev/null @@ -1,1245 +0,0 @@ -package unstable - -import ( - "bytes" - "fmt" - "unicode" - - "github.com/pelletier/go-toml/v2/internal/characters" - "github.com/pelletier/go-toml/v2/internal/danger" -) - -// ParserError describes an error relative to the content of the document. -// -// It cannot outlive the instance of Parser it refers to, and may cause panics -// if the parser is reset. -type ParserError struct { - Highlight []byte - Message string - Key []string // optional -} - -// Error is the implementation of the error interface. -func (e *ParserError) Error() string { - return e.Message -} - -// NewParserError is a convenience function to create a ParserError -// -// Warning: Highlight needs to be a subslice of Parser.data, so only slices -// returned by Parser.Raw are valid candidates. -func NewParserError(highlight []byte, format string, args ...interface{}) error { - return &ParserError{ - Highlight: highlight, - Message: fmt.Errorf(format, args...).Error(), - } -} - -// Parser scans over a TOML-encoded document and generates an iterative AST. -// -// To prime the Parser, first reset it with the contents of a TOML document. -// Then, process all top-level expressions sequentially. See Example. -// -// Don't forget to check Error() after you're done parsing. -// -// Each top-level expression needs to be fully processed before calling -// NextExpression() again. Otherwise, calls to various Node methods may panic if -// the parser has moved on the next expression. -// -// For performance reasons, go-toml doesn't make a copy of the input bytes to -// the parser. Make sure to copy all the bytes you need to outlive the slice -// given to the parser. -type Parser struct { - data []byte - builder builder - ref reference - left []byte - err error - first bool - - KeepComments bool -} - -// Data returns the slice provided to the last call to Reset. -func (p *Parser) Data() []byte { - return p.data -} - -// Range returns a range description that corresponds to a given slice of the -// input. If the argument is not a subslice of the parser input, this function -// panics. -func (p *Parser) Range(b []byte) Range { - return Range{ - Offset: uint32(danger.SubsliceOffset(p.data, b)), - Length: uint32(len(b)), - } -} - -// Raw returns the slice corresponding to the bytes in the given range. -func (p *Parser) Raw(raw Range) []byte { - return p.data[raw.Offset : raw.Offset+raw.Length] -} - -// Reset brings the parser to its initial state for a given input. It wipes an -// reuses internal storage to reduce allocation. -func (p *Parser) Reset(b []byte) { - p.builder.Reset() - p.ref = invalidReference - p.data = b - p.left = b - p.err = nil - p.first = true -} - -// NextExpression parses the next top-level expression. If an expression was -// successfully parsed, it returns true. If the parser is at the end of the -// document or an error occurred, it returns false. -// -// Retrieve the parsed expression with Expression(). -func (p *Parser) NextExpression() bool { - if len(p.left) == 0 || p.err != nil { - return false - } - - p.builder.Reset() - p.ref = invalidReference - - for { - if len(p.left) == 0 || p.err != nil { - return false - } - - if !p.first { - p.left, p.err = p.parseNewline(p.left) - } - - if len(p.left) == 0 || p.err != nil { - return false - } - - p.ref, p.left, p.err = p.parseExpression(p.left) - - if p.err != nil { - return false - } - - p.first = false - - if p.ref.Valid() { - return true - } - } -} - -// Expression returns a pointer to the node representing the last successfully -// parsed expression. -func (p *Parser) Expression() *Node { - return p.builder.NodeAt(p.ref) -} - -// Error returns any error that has occurred during parsing. -func (p *Parser) Error() error { - return p.err -} - -// Position describes a position in the input. -type Position struct { - // Number of bytes from the beginning of the input. - Offset int - // Line number, starting at 1. - Line int - // Column number, starting at 1. - Column int -} - -// Shape describes the position of a range in the input. -type Shape struct { - Start Position - End Position -} - -func (p *Parser) position(b []byte) Position { - offset := danger.SubsliceOffset(p.data, b) - - lead := p.data[:offset] - - return Position{ - Offset: offset, - Line: bytes.Count(lead, []byte{'\n'}) + 1, - Column: len(lead) - bytes.LastIndex(lead, []byte{'\n'}), - } -} - -// Shape returns the shape of the given range in the input. Will -// panic if the range is not a subslice of the input. -func (p *Parser) Shape(r Range) Shape { - raw := p.Raw(r) - return Shape{ - Start: p.position(raw), - End: p.position(raw[r.Length:]), - } -} - -func (p *Parser) parseNewline(b []byte) ([]byte, error) { - if b[0] == '\n' { - return b[1:], nil - } - - if b[0] == '\r' { - _, rest, err := scanWindowsNewline(b) - return rest, err - } - - return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) -} - -func (p *Parser) parseComment(b []byte) (reference, []byte, error) { - ref := invalidReference - data, rest, err := scanComment(b) - if p.KeepComments && err == nil { - ref = p.builder.Push(Node{ - Kind: Comment, - Raw: p.Range(data), - Data: data, - }) - } - return ref, rest, err -} - -func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { - // expression = ws [ comment ] - // expression =/ ws keyval ws [ comment ] - // expression =/ ws table ws [ comment ] - ref := invalidReference - - b = p.parseWhitespace(b) - - if len(b) == 0 { - return ref, b, nil - } - - if b[0] == '#' { - ref, rest, err := p.parseComment(b) - return ref, rest, err - } - - if b[0] == '\n' || b[0] == '\r' { - return ref, b, nil - } - - var err error - if b[0] == '[' { - ref, b, err = p.parseTable(b) - } else { - ref, b, err = p.parseKeyval(b) - } - - if err != nil { - return ref, nil, err - } - - b = p.parseWhitespace(b) - - if len(b) > 0 && b[0] == '#' { - cref, rest, err := p.parseComment(b) - if cref != invalidReference { - p.builder.Chain(ref, cref) - } - return ref, rest, err - } - - return ref, b, nil -} - -func (p *Parser) parseTable(b []byte) (reference, []byte, error) { - // table = std-table / array-table - if len(b) > 1 && b[1] == '[' { - return p.parseArrayTable(b) - } - - return p.parseStdTable(b) -} - -func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { - // array-table = array-table-open key array-table-close - // array-table-open = %x5B.5B ws ; [[ Double left square bracket - // array-table-close = ws %x5D.5D ; ]] Double right square bracket - ref := p.builder.Push(Node{ - Kind: ArrayTable, - }) - - b = b[2:] - b = p.parseWhitespace(b) - - k, b, err := p.parseKey(b) - if err != nil { - return ref, nil, err - } - - p.builder.AttachChild(ref, k) - b = p.parseWhitespace(b) - - b, err = expect(']', b) - if err != nil { - return ref, nil, err - } - - b, err = expect(']', b) - - return ref, b, err -} - -func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { - // std-table = std-table-open key std-table-close - // std-table-open = %x5B ws ; [ Left square bracket - // std-table-close = ws %x5D ; ] Right square bracket - ref := p.builder.Push(Node{ - Kind: Table, - }) - - b = b[1:] - b = p.parseWhitespace(b) - - key, b, err := p.parseKey(b) - if err != nil { - return ref, nil, err - } - - p.builder.AttachChild(ref, key) - - b = p.parseWhitespace(b) - - b, err = expect(']', b) - - return ref, b, err -} - -func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { - // keyval = key keyval-sep val - ref := p.builder.Push(Node{ - Kind: KeyValue, - }) - - key, b, err := p.parseKey(b) - if err != nil { - return invalidReference, nil, err - } - - // keyval-sep = ws %x3D ws ; = - - b = p.parseWhitespace(b) - - if len(b) == 0 { - return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") - } - - b, err = expect('=', b) - if err != nil { - return invalidReference, nil, err - } - - b = p.parseWhitespace(b) - - valRef, b, err := p.parseVal(b) - if err != nil { - return ref, b, err - } - - p.builder.Chain(valRef, key) - p.builder.AttachChild(ref, valRef) - - return ref, b, err -} - -//nolint:cyclop,funlen -func (p *Parser) parseVal(b []byte) (reference, []byte, error) { - // val = string / boolean / array / inline-table / date-time / float / integer - ref := invalidReference - - if len(b) == 0 { - return ref, nil, NewParserError(b, "expected value, not eof") - } - - var err error - c := b[0] - - switch c { - case '"': - var raw []byte - var v []byte - if scanFollowsMultilineBasicStringDelimiter(b) { - raw, v, b, err = p.parseMultilineBasicString(b) - } else { - raw, v, b, err = p.parseBasicString(b) - } - - if err == nil { - ref = p.builder.Push(Node{ - Kind: String, - Raw: p.Range(raw), - Data: v, - }) - } - - return ref, b, err - case '\'': - var raw []byte - var v []byte - if scanFollowsMultilineLiteralStringDelimiter(b) { - raw, v, b, err = p.parseMultilineLiteralString(b) - } else { - raw, v, b, err = p.parseLiteralString(b) - } - - if err == nil { - ref = p.builder.Push(Node{ - Kind: String, - Raw: p.Range(raw), - Data: v, - }) - } - - return ref, b, err - case 't': - if !scanFollowsTrue(b) { - return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") - } - - ref = p.builder.Push(Node{ - Kind: Bool, - Data: b[:4], - }) - - return ref, b[4:], nil - case 'f': - if !scanFollowsFalse(b) { - return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") - } - - ref = p.builder.Push(Node{ - Kind: Bool, - Data: b[:5], - }) - - return ref, b[5:], nil - case '[': - return p.parseValArray(b) - case '{': - return p.parseInlineTable(b) - default: - return p.parseIntOrFloatOrDateTime(b) - } -} - -func atmost(b []byte, n int) []byte { - if n >= len(b) { - return b - } - - return b[:n] -} - -func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { - v, rest, err := scanLiteralString(b) - if err != nil { - return nil, nil, nil, err - } - - return v, v[1 : len(v)-1], rest, nil -} - -func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { - // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close - // inline-table-open = %x7B ws ; { - // inline-table-close = ws %x7D ; } - // inline-table-sep = ws %x2C ws ; , Comma - // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] - parent := p.builder.Push(Node{ - Kind: InlineTable, - Raw: p.Range(b[:1]), - }) - - first := true - - var child reference - - b = b[1:] - - var err error - - for len(b) > 0 { - previousB := b - b = p.parseWhitespace(b) - - if len(b) == 0 { - return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") - } - - if b[0] == '}' { - break - } - - if !first { - b, err = expect(',', b) - if err != nil { - return parent, nil, err - } - b = p.parseWhitespace(b) - } - - var kv reference - - kv, b, err = p.parseKeyval(b) - if err != nil { - return parent, nil, err - } - - if first { - p.builder.AttachChild(parent, kv) - } else { - p.builder.Chain(child, kv) - } - child = kv - - first = false - } - - rest, err := expect('}', b) - - return parent, rest, err -} - -//nolint:funlen,cyclop -func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { - // array = array-open [ array-values ] ws-comment-newline array-close - // array-open = %x5B ; [ - // array-close = %x5D ; ] - // array-values = ws-comment-newline val ws-comment-newline array-sep array-values - // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] - // array-sep = %x2C ; , Comma - // ws-comment-newline = *( wschar / [ comment ] newline ) - arrayStart := b - b = b[1:] - - parent := p.builder.Push(Node{ - Kind: Array, - }) - - // First indicates whether the parser is looking for the first element - // (non-comment) of the array. - first := true - - lastChild := invalidReference - - addChild := func(valueRef reference) { - if lastChild == invalidReference { - p.builder.AttachChild(parent, valueRef) - } else { - p.builder.Chain(lastChild, valueRef) - } - lastChild = valueRef - } - - var err error - for len(b) > 0 { - cref := invalidReference - cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) - if err != nil { - return parent, nil, err - } - - if cref != invalidReference { - addChild(cref) - } - - if len(b) == 0 { - return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") - } - - if b[0] == ']' { - break - } - - if b[0] == ',' { - if first { - return parent, nil, NewParserError(b[0:1], "array cannot start with comma") - } - b = b[1:] - - cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) - if err != nil { - return parent, nil, err - } - if cref != invalidReference { - addChild(cref) - } - } else if !first { - return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") - } - - // TOML allows trailing commas in arrays. - if len(b) > 0 && b[0] == ']' { - break - } - - var valueRef reference - valueRef, b, err = p.parseVal(b) - if err != nil { - return parent, nil, err - } - - addChild(valueRef) - - cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) - if err != nil { - return parent, nil, err - } - if cref != invalidReference { - addChild(cref) - } - - first = false - } - - rest, err := expect(']', b) - - return parent, rest, err -} - -func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []byte, error) { - rootCommentRef := invalidReference - latestCommentRef := invalidReference - - addComment := func(ref reference) { - if rootCommentRef == invalidReference { - rootCommentRef = ref - } else if latestCommentRef == invalidReference { - p.builder.AttachChild(rootCommentRef, ref) - latestCommentRef = ref - } else { - p.builder.Chain(latestCommentRef, ref) - latestCommentRef = ref - } - } - - for len(b) > 0 { - var err error - b = p.parseWhitespace(b) - - if len(b) > 0 && b[0] == '#' { - var ref reference - ref, b, err = p.parseComment(b) - if err != nil { - return invalidReference, nil, err - } - if ref != invalidReference { - addComment(ref) - } - } - - if len(b) == 0 { - break - } - - if b[0] == '\n' || b[0] == '\r' { - b, err = p.parseNewline(b) - if err != nil { - return invalidReference, nil, err - } - } else { - break - } - } - - return rootCommentRef, b, nil -} - -func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { - token, rest, err := scanMultilineLiteralString(b) - if err != nil { - return nil, nil, nil, err - } - - i := 3 - - // skip the immediate new line - if token[i] == '\n' { - i++ - } else if token[i] == '\r' && token[i+1] == '\n' { - i += 2 - } - - return token, token[i : len(token)-3], rest, err -} - -//nolint:funlen,gocognit,cyclop -func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { - // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body - // ml-basic-string-delim - // ml-basic-string-delim = 3quotation-mark - // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] - // - // mlb-content = mlb-char / newline / mlb-escaped-nl - // mlb-char = mlb-unescaped / escaped - // mlb-quotes = 1*2quotation-mark - // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // mlb-escaped-nl = escape ws newline *( wschar / newline ) - token, escaped, rest, err := scanMultilineBasicString(b) - if err != nil { - return nil, nil, nil, err - } - - i := 3 - - // skip the immediate new line - if token[i] == '\n' { - i++ - } else if token[i] == '\r' && token[i+1] == '\n' { - i += 2 - } - - // fast path - startIdx := i - endIdx := len(token) - len(`"""`) - - if !escaped { - str := token[startIdx:endIdx] - verr := characters.Utf8TomlValidAlreadyEscaped(str) - if verr.Zero() { - return token, str, rest, nil - } - return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") - } - - var builder bytes.Buffer - - // The scanner ensures that the token starts and ends with quotes and that - // escapes are balanced. - for i < len(token)-3 { - c := token[i] - - //nolint:nestif - if c == '\\' { - // When the last non-whitespace character on a line is an unescaped \, - // it will be trimmed along with all whitespace (including newlines) up - // to the next non-whitespace character or closing delimiter. - - isLastNonWhitespaceOnLine := false - j := 1 - findEOLLoop: - for ; j < len(token)-3-i; j++ { - switch token[i+j] { - case ' ', '\t': - continue - case '\r': - if token[i+j+1] == '\n' { - continue - } - case '\n': - isLastNonWhitespaceOnLine = true - } - break findEOLLoop - } - if isLastNonWhitespaceOnLine { - i += j - for ; i < len(token)-3; i++ { - c := token[i] - if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { - i-- - break - } - } - i++ - continue - } - - // handle escaping - i++ - c = token[i] - - switch c { - case '"', '\\': - builder.WriteByte(c) - case 'b': - builder.WriteByte('\b') - case 'f': - builder.WriteByte('\f') - case 'n': - builder.WriteByte('\n') - case 'r': - builder.WriteByte('\r') - case 't': - builder.WriteByte('\t') - case 'e': - builder.WriteByte(0x1B) - case 'u': - x, err := hexToRune(atmost(token[i+1:], 4), 4) - if err != nil { - return nil, nil, nil, err - } - builder.WriteRune(x) - i += 4 - case 'U': - x, err := hexToRune(atmost(token[i+1:], 8), 8) - if err != nil { - return nil, nil, nil, err - } - - builder.WriteRune(x) - i += 8 - default: - return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) - } - i++ - } else { - size := characters.Utf8ValidNext(token[i:]) - if size == 0 { - return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) - } - builder.Write(token[i : i+size]) - i += size - } - } - - return token, builder.Bytes(), rest, nil -} - -func (p *Parser) parseKey(b []byte) (reference, []byte, error) { - // key = simple-key / dotted-key - // simple-key = quoted-key / unquoted-key - // - // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ - // quoted-key = basic-string / literal-string - // dotted-key = simple-key 1*( dot-sep simple-key ) - // - // dot-sep = ws %x2E ws ; . Period - raw, key, b, err := p.parseSimpleKey(b) - if err != nil { - return invalidReference, nil, err - } - - ref := p.builder.Push(Node{ - Kind: Key, - Raw: p.Range(raw), - Data: key, - }) - - for { - b = p.parseWhitespace(b) - if len(b) > 0 && b[0] == '.' { - b = p.parseWhitespace(b[1:]) - - raw, key, b, err = p.parseSimpleKey(b) - if err != nil { - return ref, nil, err - } - - p.builder.PushAndChain(Node{ - Kind: Key, - Raw: p.Range(raw), - Data: key, - }) - } else { - break - } - } - - return ref, b, nil -} - -func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { - if len(b) == 0 { - return nil, nil, nil, NewParserError(b, "expected key but found none") - } - - // simple-key = quoted-key / unquoted-key - // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ - // quoted-key = basic-string / literal-string - switch { - case b[0] == '\'': - return p.parseLiteralString(b) - case b[0] == '"': - return p.parseBasicString(b) - case isUnquotedKeyChar(b[0]): - key, rest = scanUnquotedKey(b) - return key, key, rest, nil - default: - return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) - } -} - -//nolint:funlen,cyclop -func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { - // basic-string = quotation-mark *basic-char quotation-mark - // quotation-mark = %x22 ; " - // basic-char = basic-unescaped / escaped - // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // escaped = escape escape-seq-char - // escape-seq-char = %x22 ; " quotation mark U+0022 - // escape-seq-char =/ %x5C ; \ reverse solidus U+005C - // escape-seq-char =/ %x62 ; b backspace U+0008 - // escape-seq-char =/ %x66 ; f form feed U+000C - // escape-seq-char =/ %x6E ; n line feed U+000A - // escape-seq-char =/ %x72 ; r carriage return U+000D - // escape-seq-char =/ %x74 ; t tab U+0009 - // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX - // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX - token, escaped, rest, err := scanBasicString(b) - if err != nil { - return nil, nil, nil, err - } - - startIdx := len(`"`) - endIdx := len(token) - len(`"`) - - // Fast path. If there is no escape sequence, the string should just be - // an UTF-8 encoded string, which is the same as Go. In that case, - // validate the string and return a direct reference to the buffer. - if !escaped { - str := token[startIdx:endIdx] - verr := characters.Utf8TomlValidAlreadyEscaped(str) - if verr.Zero() { - return token, str, rest, nil - } - return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") - } - - i := startIdx - - var builder bytes.Buffer - - // The scanner ensures that the token starts and ends with quotes and that - // escapes are balanced. - for i < len(token)-1 { - c := token[i] - if c == '\\' { - i++ - c = token[i] - - switch c { - case '"', '\\': - builder.WriteByte(c) - case 'b': - builder.WriteByte('\b') - case 'f': - builder.WriteByte('\f') - case 'n': - builder.WriteByte('\n') - case 'r': - builder.WriteByte('\r') - case 't': - builder.WriteByte('\t') - case 'e': - builder.WriteByte(0x1B) - case 'u': - x, err := hexToRune(token[i+1:len(token)-1], 4) - if err != nil { - return nil, nil, nil, err - } - - builder.WriteRune(x) - i += 4 - case 'U': - x, err := hexToRune(token[i+1:len(token)-1], 8) - if err != nil { - return nil, nil, nil, err - } - - builder.WriteRune(x) - i += 8 - default: - return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) - } - i++ - } else { - size := characters.Utf8ValidNext(token[i:]) - if size == 0 { - return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) - } - builder.Write(token[i : i+size]) - i += size - } - } - - return token, builder.Bytes(), rest, nil -} - -func hexToRune(b []byte, length int) (rune, error) { - if len(b) < length { - return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) - } - b = b[:length] - - var r uint32 - for i, c := range b { - d := uint32(0) - switch { - case '0' <= c && c <= '9': - d = uint32(c - '0') - case 'a' <= c && c <= 'f': - d = uint32(c - 'a' + 10) - case 'A' <= c && c <= 'F': - d = uint32(c - 'A' + 10) - default: - return -1, NewParserError(b[i:i+1], "non-hex character") - } - r = r*16 + d - } - - if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { - return -1, NewParserError(b, "escape sequence is invalid Unicode code point") - } - - return rune(r), nil -} - -func (p *Parser) parseWhitespace(b []byte) []byte { - // ws = *wschar - // wschar = %x20 ; Space - // wschar =/ %x09 ; Horizontal tab - _, rest := scanWhitespace(b) - - return rest -} - -//nolint:cyclop -func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { - switch b[0] { - case 'i': - if !scanFollowsInf(b) { - return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") - } - - return p.builder.Push(Node{ - Kind: Float, - Data: b[:3], - Raw: p.Range(b[:3]), - }), b[3:], nil - case 'n': - if !scanFollowsNan(b) { - return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") - } - - return p.builder.Push(Node{ - Kind: Float, - Data: b[:3], - Raw: p.Range(b[:3]), - }), b[3:], nil - case '+', '-': - return p.scanIntOrFloat(b) - } - - if len(b) < 3 { - return p.scanIntOrFloat(b) - } - - s := 5 - if len(b) < s { - s = len(b) - } - - for idx, c := range b[:s] { - if isDigit(c) { - continue - } - - if idx == 2 && c == ':' || (idx == 4 && c == '-') { - return p.scanDateTime(b) - } - - break - } - - return p.scanIntOrFloat(b) -} - -func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { - // scans for contiguous characters in [0-9T:Z.+-], and up to one space if - // followed by a digit. - hasDate := false - hasTime := false - hasTz := false - seenSpace := false - - i := 0 -byteLoop: - for ; i < len(b); i++ { - c := b[i] - - switch { - case isDigit(c): - case c == '-': - hasDate = true - const minOffsetOfTz = 8 - if i >= minOffsetOfTz { - hasTz = true - } - case c == 'T' || c == 't' || c == ':' || c == '.': - hasTime = true - case c == '+' || c == '-' || c == 'Z' || c == 'z': - hasTz = true - case c == ' ': - if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { - i += 2 - // Avoid reaching past the end of the document in case the time - // is malformed. See TestIssue585. - if i >= len(b) { - i-- - } - seenSpace = true - hasTime = true - } else { - break byteLoop - } - default: - break byteLoop - } - } - - var kind Kind - - if hasTime { - if hasDate { - if hasTz { - kind = DateTime - } else { - kind = LocalDateTime - } - } else { - kind = LocalTime - } - } else { - kind = LocalDate - } - - return p.builder.Push(Node{ - Kind: kind, - Data: b[:i], - }), b[i:], nil -} - -//nolint:funlen,gocognit,cyclop -func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { - i := 0 - - if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { - var isValidRune validRuneFn - - switch b[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - i++ - } - - if isValidRune != nil { - i += 2 - for ; i < len(b); i++ { - if !isValidRune(b[i]) { - break - } - } - } - - return p.builder.Push(Node{ - Kind: Integer, - Data: b[:i], - Raw: p.Range(b[:i]), - }), b[i:], nil - } - - isFloat := false - - for ; i < len(b); i++ { - c := b[i] - - if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { - continue - } - - if c == '.' || c == 'e' || c == 'E' { - isFloat = true - - continue - } - - if c == 'i' { - if scanFollowsInf(b[i:]) { - return p.builder.Push(Node{ - Kind: Float, - Data: b[:i+3], - Raw: p.Range(b[:i+3]), - }), b[i+3:], nil - } - - return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") - } - - if c == 'n' { - if scanFollowsNan(b[i:]) { - return p.builder.Push(Node{ - Kind: Float, - Data: b[:i+3], - Raw: p.Range(b[:i+3]), - }), b[i+3:], nil - } - - return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") - } - - break - } - - if i == 0 { - return invalidReference, b, NewParserError(b, "incomplete number") - } - - kind := Integer - - if isFloat { - kind = Float - } - - return p.builder.Push(Node{ - Kind: kind, - Data: b[:i], - Raw: p.Range(b[:i]), - }), b[i:], nil -} - -func isDigit(r byte) bool { - return r >= '0' && r <= '9' -} - -type validRuneFn func(r byte) bool - -func isValidHexRune(r byte) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r byte) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r byte) bool { - return r == '0' || r == '1' || r == '_' -} - -func expect(x byte, b []byte) ([]byte, error) { - if len(b) == 0 { - return nil, NewParserError(b, "expected character %c but the document ended here", x) - } - - if b[0] != x { - return nil, NewParserError(b[0:1], "expected character %c", x) - } - - return b[1:], nil -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go deleted file mode 100644 index 0512181d28..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go +++ /dev/null @@ -1,270 +0,0 @@ -package unstable - -import "github.com/pelletier/go-toml/v2/internal/characters" - -func scanFollows(b []byte, pattern string) bool { - n := len(pattern) - - return len(b) >= n && string(b[:n]) == pattern -} - -func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { - return scanFollows(b, `"""`) -} - -func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { - return scanFollows(b, `'''`) -} - -func scanFollowsTrue(b []byte) bool { - return scanFollows(b, `true`) -} - -func scanFollowsFalse(b []byte) bool { - return scanFollows(b, `false`) -} - -func scanFollowsInf(b []byte) bool { - return scanFollows(b, `inf`) -} - -func scanFollowsNan(b []byte) bool { - return scanFollows(b, `nan`) -} - -func scanUnquotedKey(b []byte) ([]byte, []byte) { - // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ - for i := 0; i < len(b); i++ { - if !isUnquotedKeyChar(b[i]) { - return b[:i], b[i:] - } - } - - return b, b[len(b):] -} - -func isUnquotedKeyChar(r byte) bool { - return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' -} - -func scanLiteralString(b []byte) ([]byte, []byte, error) { - // literal-string = apostrophe *literal-char apostrophe - // apostrophe = %x27 ; ' apostrophe - // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii - for i := 1; i < len(b); { - switch b[i] { - case '\'': - return b[:i+1], b[i+1:], nil - case '\n', '\r': - return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") - } - size := characters.Utf8ValidNext(b[i:]) - if size == 0 { - return nil, nil, NewParserError(b[i:i+1], "invalid character") - } - i += size - } - - return nil, nil, NewParserError(b[len(b):], "unterminated literal string") -} - -func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { - // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body - // ml-literal-string-delim - // ml-literal-string-delim = 3apostrophe - // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] - // - // mll-content = mll-char / newline - // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii - // mll-quotes = 1*2apostrophe - for i := 3; i < len(b); { - switch b[i] { - case '\'': - if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { - i += 3 - - // At that point we found 3 apostrophe, and i is the - // index of the byte after the third one. The scanner - // needs to be eager, because there can be an extra 2 - // apostrophe that can be accepted at the end of the - // string. - - if i >= len(b) || b[i] != '\'' { - return b[:i], b[i:], nil - } - i++ - - if i >= len(b) || b[i] != '\'' { - return b[:i], b[i:], nil - } - i++ - - if i < len(b) && b[i] == '\'' { - return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") - } - - return b[:i], b[i:], nil - } - case '\r': - if len(b) < i+2 { - return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) - } - if b[i+1] != '\n' { - return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) - } - i += 2 // skip the \n - continue - } - size := characters.Utf8ValidNext(b[i:]) - if size == 0 { - return nil, nil, NewParserError(b[i:i+1], "invalid character") - } - i += size - } - - return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) -} - -func scanWindowsNewline(b []byte) ([]byte, []byte, error) { - const lenCRLF = 2 - if len(b) < lenCRLF { - return nil, nil, NewParserError(b, "windows new line expected") - } - - if b[1] != '\n' { - return nil, nil, NewParserError(b, `windows new line should be \r\n`) - } - - return b[:lenCRLF], b[lenCRLF:], nil -} - -func scanWhitespace(b []byte) ([]byte, []byte) { - for i := 0; i < len(b); i++ { - switch b[i] { - case ' ', '\t': - continue - default: - return b[:i], b[i:] - } - } - - return b, b[len(b):] -} - -func scanComment(b []byte) ([]byte, []byte, error) { - // comment-start-symbol = %x23 ; # - // non-ascii = %x80-D7FF / %xE000-10FFFF - // non-eol = %x09 / %x20-7F / non-ascii - // - // comment = comment-start-symbol *non-eol - - for i := 1; i < len(b); { - if b[i] == '\n' { - return b[:i], b[i:], nil - } - if b[i] == '\r' { - if i+1 < len(b) && b[i+1] == '\n' { - return b[:i+1], b[i+1:], nil - } - return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") - } - size := characters.Utf8ValidNext(b[i:]) - if size == 0 { - return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") - } - - i += size - } - - return b, b[len(b):], nil -} - -func scanBasicString(b []byte) ([]byte, bool, []byte, error) { - // basic-string = quotation-mark *basic-char quotation-mark - // quotation-mark = %x22 ; " - // basic-char = basic-unescaped / escaped - // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // escaped = escape escape-seq-char - escaped := false - i := 1 - - for ; i < len(b); i++ { - switch b[i] { - case '"': - return b[:i+1], escaped, b[i+1:], nil - case '\n', '\r': - return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") - case '\\': - if len(b) < i+2 { - return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") - } - escaped = true - i++ // skip the next character - } - } - - return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) -} - -func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { - // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body - // ml-basic-string-delim - // ml-basic-string-delim = 3quotation-mark - // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] - // - // mlb-content = mlb-char / newline / mlb-escaped-nl - // mlb-char = mlb-unescaped / escaped - // mlb-quotes = 1*2quotation-mark - // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii - // mlb-escaped-nl = escape ws newline *( wschar / newline ) - - escaped := false - i := 3 - - for ; i < len(b); i++ { - switch b[i] { - case '"': - if scanFollowsMultilineBasicStringDelimiter(b[i:]) { - i += 3 - - // At that point we found 3 apostrophe, and i is the - // index of the byte after the third one. The scanner - // needs to be eager, because there can be an extra 2 - // apostrophe that can be accepted at the end of the - // string. - - if i >= len(b) || b[i] != '"' { - return b[:i], escaped, b[i:], nil - } - i++ - - if i >= len(b) || b[i] != '"' { - return b[:i], escaped, b[i:], nil - } - i++ - - if i < len(b) && b[i] == '"' { - return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) - } - - return b[:i], escaped, b[i:], nil - } - case '\\': - if len(b) < i+2 { - return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") - } - escaped = true - i++ // skip the next character - case '\r': - if len(b) < i+2 { - return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) - } - if b[i+1] != '\n' { - return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) - } - i++ // skip the \n - } - } - - return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) -} diff --git a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go deleted file mode 100644 index 00cfd6de45..0000000000 --- a/openshift/tools/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go +++ /dev/null @@ -1,7 +0,0 @@ -package unstable - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a TOML document. -type Unmarshaler interface { - UnmarshalTOML(value *Node) error -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/NOTICE b/openshift/tools/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index b9cc55abbb..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,18 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE deleted file mode 100644 index 65d761bc9f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go deleted file mode 100644 index 8547c8dfd1..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package header provides functions for parsing HTTP headers. -package header - -import ( - "net/http" - "strings" -) - -// Octet types from RFC 2616. -var octetTypes [256]octetType - -type octetType byte - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// AcceptSpec describes an Accept* header. -type AcceptSpec struct { - Value string - Q float64 -} - -// ParseAccept parses Accept* headers. -func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { -loop: - for _, s := range header[key] { - for { - var spec AcceptSpec - spec.Value, s = expectTokenSlash(s) - if spec.Value == "" { - continue loop - } - spec.Q = 1.0 - s = skipSpace(s) - if strings.HasPrefix(s, ";") { - s = skipSpace(s[1:]) - if !strings.HasPrefix(s, "q=") { - continue loop - } - spec.Q, s = expectQuality(s[2:]) - if spec.Q < 0.0 { - continue loop - } - } - specs = append(specs, spec) - s = skipSpace(s) - if !strings.HasPrefix(s, ",") { - continue loop - } - s = skipSpace(s[1:]) - } - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectTokenSlash(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - b := s[i] - if (octetTypes[b]&isToken == 0) && b != '/' { - break - } - } - return s[:i], s[i:] -} - -func expectQuality(s string) (q float64, rest string) { - switch { - case len(s) == 0: - return -1, "" - case s[0] == '0': - q = 0 - case s[0] == '1': - q = 1 - default: - return -1, "" - } - s = s[1:] - if !strings.HasPrefix(s, ".") { - return q, s - } - s = s[1:] - i := 0 - n := 0 - d := 1 - for ; i < len(s); i++ { - b := s[i] - if b < '0' || b > '9' { - break - } - n = n*10 + int(b) - '0' - d *= 10 - } - return q + float64(n)/float64(d), s[i:] -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go deleted file mode 100644 index 2e45780b74..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -package httputil - -import ( - "net/http" - - "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" -) - -// NegotiateContentEncoding returns the best offered content encoding for the -// request's Accept-Encoding header. If two offers match with equal weight and -// then the offer earlier in the list is preferred. If no offers are -// acceptable, then "" is returned. -func NegotiateContentEncoding(r *http.Request, offers []string) string { - bestOffer := "identity" - bestQ := -1.0 - specs := header.ParseAccept(r.Header, "Accept-Encoding") - for _, offer := range offers { - for _, spec := range specs { - if spec.Q > bestQ && - (spec.Value == "*" || spec.Value == offer) { - bestQ = spec.Q - bestOffer = offer - } - } - } - if bestQ == 0 { - bestOffer = "" - } - return bestOffer -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346d..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/README.md b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index c67ff1b7fa..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus). diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go deleted file mode 100644 index 450189f35e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "runtime/debug" - -// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewBuildInfoCollector instead. -func NewBuildInfoCollector() Collector { - path, version, sum := "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - c := &selfCollector{MustNewConstMetric( - NewDesc( - "go_build_info", - "Build information about the main Go module.", - nil, Labels{"path": path, "version": version, "checksum": sum}, - ), - GaugeValue, 1)} - c.init(c.self) - return c -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index cf05079fb8..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “unchecked”, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. - Collect(chan<- Metric) -} - -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collector (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} - -// collectorMetric is a metric that is also a collector. -// Because of selfCollector, most (if not all) Metrics in -// this package are also collectors. -type collectorMetric interface { - Metric - Collector -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go deleted file mode 100644 index 9a71a15db1..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// CollectorFunc is a convenient way to implement a Prometheus Collector -// without interface boilerplate. -// This implementation is based on DescribeByCollect method. -// familiarize yourself to it before using. -type CollectorFunc func(chan<- Metric) - -// Collect calls the defined CollectorFunc function with the provided Metrics channel -func (f CollectorFunc) Collect(ch chan<- Metric) { - f(ch) -} - -// Describe sends the descriptor information using DescribeByCollect -func (f CollectorFunc) Describe(ch chan<- *Desc) { - DescribeByCollect(f, ch) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go deleted file mode 100644 index f4c92913a5..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package collectors provides implementations of prometheus.Collector to -// conveniently collect process and Go-related metrics. -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewBuildInfoCollector returns a collector collecting a single metric -// "go_build_info" with the constant value 1 and three labels "path", "version", -// and "checksum". Their label values contain the main module path, version, and -// checksum, respectively. The labels will only have meaningful values if the -// binary is built with Go module support and from source code retrieved from -// the source repository (rather than the local file system). This is usually -// accomplished by building from outside of GOPATH, specifying the full address -// of the main package, e.g. "GO111MODULE=on go run -// github.com/prometheus/client_golang/examples/random". If built without Go -// module support, all label values will be "unknown". If built with Go module -// support but using the source code from the local file system, the "path" will -// be set appropriately, but "checksum" will be empty and "version" will be -// "(devel)". -// -// This collector uses only the build information for the main module. See -// https://github.com/povilasv/prommod for an example of a collector for the -// module dependencies. -func NewBuildInfoCollector() prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewBuildInfoCollector() -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go deleted file mode 100644 index d5a7279fb9..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import ( - "database/sql" - - "github.com/prometheus/client_golang/prometheus" -) - -type dbStatsCollector struct { - db *sql.DB - - maxOpenConnections *prometheus.Desc - - openConnections *prometheus.Desc - inUseConnections *prometheus.Desc - idleConnections *prometheus.Desc - - waitCount *prometheus.Desc - waitDuration *prometheus.Desc - maxIdleClosed *prometheus.Desc - maxIdleTimeClosed *prometheus.Desc - maxLifetimeClosed *prometheus.Desc -} - -// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. -// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. -func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { - fqName := func(name string) string { - return "go_sql_" + name - } - return &dbStatsCollector{ - db: db, - maxOpenConnections: prometheus.NewDesc( - fqName("max_open_connections"), - "Maximum number of open connections to the database.", - nil, prometheus.Labels{"db_name": dbName}, - ), - openConnections: prometheus.NewDesc( - fqName("open_connections"), - "The number of established connections both in use and idle.", - nil, prometheus.Labels{"db_name": dbName}, - ), - inUseConnections: prometheus.NewDesc( - fqName("in_use_connections"), - "The number of connections currently in use.", - nil, prometheus.Labels{"db_name": dbName}, - ), - idleConnections: prometheus.NewDesc( - fqName("idle_connections"), - "The number of idle connections.", - nil, prometheus.Labels{"db_name": dbName}, - ), - waitCount: prometheus.NewDesc( - fqName("wait_count_total"), - "The total number of connections waited for.", - nil, prometheus.Labels{"db_name": dbName}, - ), - waitDuration: prometheus.NewDesc( - fqName("wait_duration_seconds_total"), - "The total time blocked waiting for a new connection.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxIdleClosed: prometheus.NewDesc( - fqName("max_idle_closed_total"), - "The total number of connections closed due to SetMaxIdleConns.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxIdleTimeClosed: prometheus.NewDesc( - fqName("max_idle_time_closed_total"), - "The total number of connections closed due to SetConnMaxIdleTime.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxLifetimeClosed: prometheus.NewDesc( - fqName("max_lifetime_closed_total"), - "The total number of connections closed due to SetConnMaxLifetime.", - nil, prometheus.Labels{"db_name": dbName}, - ), - } -} - -// Describe implements Collector. -func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- c.maxOpenConnections - ch <- c.openConnections - ch <- c.inUseConnections - ch <- c.idleConnections - ch <- c.waitCount - ch <- c.waitDuration - ch <- c.maxIdleClosed - ch <- c.maxLifetimeClosed - ch <- c.maxIdleTimeClosed -} - -// Collect implements Collector. -func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { - stats := c.db.Stats() - ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) - ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) - ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) - ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) - ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) - ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) - ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) - ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) - ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go deleted file mode 100644 index b22d862fbc..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewExpvarCollector returns a newly allocated expvar Collector. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototyping, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewExpvarCollector(exports) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go deleted file mode 100644 index effc57840a..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewGoCollector returns a collector that exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This requires to “stop the world”, which usually only happens for -// garbage collection (GC). Take the following implications into account when -// deciding whether to use the Go collector: -// -// 1. The performance impact of stopping the world is the more relevant the more -// frequently metrics are collected. However, with Go1.9 or later the -// stop-the-world time per metrics collection is very short (~25µs) so that the -// performance impact will only matter in rare cases. However, with older Go -// versions, the stop-the-world duration depends on the heap size and can be -// quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -// -// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the -// metrics collection happens to coincide with GC, it will only complete after -// GC has finished. Usually, GC is fast enough to not cause problems. However, -// with a very large heap, GC might take multiple seconds, which is enough to -// cause scrape timeouts in common setups. To avoid this problem, the Go -// collector will use the memstats from a previous collection if -// runtime.ReadMemStats takes more than 1s. However, if there are no previously -// collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. -// -// NOTE: The problem is solved in Go 1.15, see -// https://github.com/golang/go/issues/19812 for the related Go issue. -func NewGoCollector() prometheus.Collector { - return prometheus.NewGoCollector() -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go deleted file mode 100644 index cc4ef1077e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package collectors - -import ( - "regexp" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/internal" -) - -var ( - // MetricsAll allows all the metrics to be collected from Go runtime. - MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")} - // MetricsGC allows only GC metrics to be collected from Go runtime. - // e.g. go_gc_cycles_automatic_gc_cycles_total - // NOTE: This does not include new class of "/cpu/classes/gc/..." metrics. - // Use custom metric rule to access those. - MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)} - // MetricsMemory allows only memory metrics to be collected from Go runtime. - // e.g. go_memory_classes_heap_free_bytes - MetricsMemory = GoRuntimeMetricsRule{regexp.MustCompile(`^/memory/.*`)} - // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. - // e.g. go_sched_goroutines_goroutines - MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} - // MetricsDebug allows only debug metrics to be collected from Go runtime. - // e.g. go_godebug_non_default_behavior_gocachetest_events_total - MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} -) - -// WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: -// -// go_memstats_alloc_bytes -// go_memstats_alloc_bytes_total -// go_memstats_sys_bytes -// go_memstats_mallocs_total -// go_memstats_frees_total -// go_memstats_heap_alloc_bytes -// go_memstats_heap_sys_bytes -// go_memstats_heap_idle_bytes -// go_memstats_heap_inuse_bytes -// go_memstats_heap_released_bytes -// go_memstats_heap_objects -// go_memstats_stack_inuse_bytes -// go_memstats_stack_sys_bytes -// go_memstats_mspan_inuse_bytes -// go_memstats_mspan_sys_bytes -// go_memstats_mcache_inuse_bytes -// go_memstats_mcache_sys_bytes -// go_memstats_buck_hash_sys_bytes -// go_memstats_gc_sys_bytes -// go_memstats_other_sys_bytes -// go_memstats_next_gc_bytes -// -// so the metrics known from pre client_golang v1.12.0, -// -// NOTE(bwplotka): The above represents runtime.MemStats statistics, but they are -// actually implemented using new runtime/metrics package. (except skipped go_memstats_gc_cpu_fraction -// -- see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation). -// -// Some users might want to disable this on collector level (although you can use scrape relabelling on Prometheus), -// because similar metrics can be now obtained using WithGoCollectorRuntimeMetrics. Note that the semantics of new -// metrics might be different, plus the names can be change over time with different Go version. -// -// NOTE(bwplotka): Changing metric names can be tedious at times as the alerts, recording rules and dashboards have to be adjusted. -// The old metrics are also very useful, with many guides and books written about how to interpret them. -// -// As a result our recommendation would be to stick with MemStats like metrics and enable other runtime/metrics if you are interested -// in advanced insights Go provides. See ExampleGoCollector_WithAdvancedGoMetrics. -func WithGoCollectorMemStatsMetricsDisabled() func(options *internal.GoCollectorOptions) { - return func(o *internal.GoCollectorOptions) { - o.DisableMemStatsLikeMetrics = true - } -} - -// GoRuntimeMetricsRule allow enabling and configuring particular group of runtime/metrics. -// TODO(bwplotka): Consider adding ability to adjust buckets. -type GoRuntimeMetricsRule struct { - // Matcher represents RE2 expression will match the runtime/metrics from https://golang.bg/src/runtime/metrics/description.go - // Use `regexp.MustCompile` or `regexp.Compile` to create this field. - Matcher *regexp.Regexp -} - -// WithGoCollectorRuntimeMetrics allows enabling and configuring particular group of runtime/metrics. -// See the list of metrics https://golang.bg/src/runtime/metrics/description.go (pick the Go version you use there!). -// You can use this option in repeated manner, which will add new rules. The order of rules is important, the last rule -// that matches particular metrics is applied. -func WithGoCollectorRuntimeMetrics(rules ...GoRuntimeMetricsRule) func(options *internal.GoCollectorOptions) { - rs := make([]internal.GoCollectorRule, len(rules)) - for i, r := range rules { - rs[i] = internal.GoCollectorRule{ - Matcher: r.Matcher, - } - } - - return func(o *internal.GoCollectorOptions) { - o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) - } -} - -// WithoutGoCollectorRuntimeMetrics allows disabling group of runtime/metrics that you might have added in WithGoCollectorRuntimeMetrics. -// It behaves similarly to WithGoCollectorRuntimeMetrics just with deny-list semantics. -func WithoutGoCollectorRuntimeMetrics(matchers ...*regexp.Regexp) func(options *internal.GoCollectorOptions) { - rs := make([]internal.GoCollectorRule, len(matchers)) - for i, m := range matchers { - rs[i] = internal.GoCollectorRule{ - Matcher: m, - Deny: true, - } - } - - return func(o *internal.GoCollectorOptions) { - o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) - } -} - -// GoCollectionOption represents Go collection option flag. -// Deprecated. -type GoCollectionOption uint32 - -const ( - // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. - // - // Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. - GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota - // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. - // - // Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) - // function to enable those metrics in the collector. - GoRuntimeMetricsCollection -) - -// WithGoCollections allows enabling different collections for Go collector on top of base metrics. -// -// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. -func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { - return func(options *internal.GoCollectorOptions) { - if flags&GoRuntimeMemStatsCollection == 0 { - WithGoCollectorMemStatsMetricsDisabled()(options) - } - - if flags&GoRuntimeMetricsCollection != 0 { - WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})(options) - } - } -} - -// NewGoCollector returns a collector that exports metrics about the current Go -// process using debug.GCStats (base metrics) and runtime/metrics (both in MemStats style and new ones). -func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewGoCollector(opts...) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go deleted file mode 100644 index 24558f50a7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. -// -// The collector only works on operating systems with a Linux-style proc -// filesystem and on Microsoft Windows. On other operating systems, it will not -// collect any metrics. -func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ - PidFn: opts.PidFn, - Namespace: opts.Namespace, - ReportErrors: opts.ReportErrors, - }) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index 4ce84e7a80..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// ExemplarAdder is implemented by Counters that offer the option of adding a -// value to the Counter together with an exemplar. Its AddWithExemplar method -// works like the Add method of the Counter interface but also replaces the -// currently saved exemplar (if any) with a new one, created from the provided -// value, the current time as timestamp, and the provided labels. Empty Labels -// will lead to a valid (label-less) exemplar. But if Labels is nil, the current -// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any -// of the provided labels are invalid, or if the provided labels contain more -// than 128 runes in total. -type ExemplarAdder interface { - AddWithExemplar(value float64, exemplar Labels) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// CounterVecOpts bundles the options to create a CounterVec metric. -// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type CounterVecOpts struct { - CounterOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation also implements ExemplarAdder. It is safe to -// perform the corresponding type assertion. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - if opts.now == nil { - opts.now = time.Now - } - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} - result.init(result) // Init self-collection. - result.createdTs = timestamppb.New(opts.now()) - return result -} - -type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - createdTs *timestamppb.Timestamp - labelPairs []*dto.LabelPair - exemplar atomic.Value // Containing nil or a *dto.Exemplar. - - // now is for testing purposes, by default it's time.Now. - now func() time.Time -} - -func (c *counter) Desc() *Desc { - return c.desc -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) AddWithExemplar(v float64, e Labels) { - c.Add(v) - c.updateExemplar(v, e) -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) get() float64 { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - return fval + float64(ival) -} - -func (c *counter) Write(out *dto.Metric) error { - // Read the Exemplar first and the value second. This is to avoid a race condition - // where users see an exemplar for a not-yet-existing observation. - var exemplar *dto.Exemplar - if e := c.exemplar.Load(); e != nil { - exemplar = e.(*dto.Exemplar) - } - val := c.get() - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) -} - -func (c *counter) updateExemplar(v float64, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, c.now(), l) - if err != nil { - panic(err) - } - c.exemplar.Store(e) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -type CounterVec struct { - *MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - return V2.NewCounterVec(CounterVecOpts{ - CounterOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. -func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - if opts.now == nil { - opts.now = time.Now - } - return &CounterVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels.names) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) - } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} - result.init(result) // Init self-collection. - result.createdTs = timestamppb.New(opts.now()) - return result - }), - } -} - -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -// -// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index ad347113c0..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - "strings" - - "github.com/cespare/xxhash/v2" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" - "google.golang.org/protobuf/proto" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels and normalization function for - // which the metric maintains variable values. - variableLabels *compiledLabels - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names and normalization functions. Their -// label values are variable and therefore not part of the Desc. (They are managed -// within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels.compile(), - } - if !model.IsValidMetricName(model.LabelValue(fqName)) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, label := range d.variableLabels.names { - if !checkLabelName(label) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) - return d - } - labelNames = append(labelNames, "$"+label) - labelNameSet[label] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) - return d - } - - xxh := xxhash.New() - for _, val := range labelValues { - xxh.WriteString(val) - xxh.Write(separatorByteSlice) - } - d.id = xxh.Sum64() - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - xxh.Reset() - xxh.WriteString(help) - xxh.Write(separatorByteSlice) - for _, labelName := range labelNames { - xxh.WriteString(labelName) - xxh.Write(separatorByteSlice) - } - d.dimHash = xxh.Sum64() - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - vlStrings := []string{} - if d.variableLabels != nil { - vlStrings = make([]string, 0, len(d.variableLabels.names)) - for _, vl := range d.variableLabels.names { - if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { - vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) - } else { - vlStrings = append(vlStrings, vl) - } - } - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - strings.Join(vlStrings, ","), - ) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 962608f02c..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// # A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// type metrics struct { -// cpuTemp prometheus.Gauge -// hdFailures *prometheus.CounterVec -// } -// -// func NewMetrics(reg prometheus.Registerer) *metrics { -// m := &metrics{ -// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }), -// hdFailures: prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ), -// } -// reg.MustRegister(m.cpuTemp) -// reg.MustRegister(m.hdFailures) -// return m -// } -// -// func main() { -// // Create a non-global registry. -// reg := prometheus.NewRegistry() -// -// // Create new metrics and register them using the custom registry. -// m := NewMetrics(reg) -// // Set values for the new created metrics. -// m.cpuTemp.Set(65.3) -// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // Expose metrics and custom registry via an HTTP server -// // using the HandleFor function. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// It register the metrics using a custom registry and exposes them via an HTTP server -// on the /metrics endpoint. -// -// # Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and -// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, -// and HistogramVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. -// -// # Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). NewConstMetric is used -// for all metric types with just a float64 as their value: Counter, Gauge, and -// a special “type” called Untyped. Use the latter if you are not sure if the -// mirrored metric is a Counter or a Gauge. Creation of the Metric instance -// happens in the Collect method. The Describe method has to return separate -// Desc instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. Alternatively, -// you could return no Desc at all, which will mark the Collector “unchecked”. -// No checks are performed at registration time, but metric consistency will -// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situation where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// # Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// # HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// -// # Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// # Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// # Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index de5a856293..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewExpvarCollector instead. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels.names)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index 3d383a735c..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index dd2eac9406..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// GaugeVecOpts bundles the options to create a GaugeVec metric. -// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type GaugeVecOpts struct { - GaugeOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. -func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - return V2.NewGaugeVec(GaugeVecOpts{ - GaugeOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. -func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels.names) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) - } - result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. Therefore, it must be safe to call the provided function -// concurrently. -// -// NewGaugeFunc is a good way to create an “info” style metric with a constant -// value of 1. Example: -// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go deleted file mode 100644 index 614fd61be9..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !js || wasm -// +build !js wasm - -package prometheus - -import "os" - -func getPIDFn() func() (int, error) { - pid := os.Getpid() - return func() (int, error) { - return pid, nil - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go deleted file mode 100644 index eaf8059ee1..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js && !wasm -// +build js,!wasm - -package prometheus - -func getPIDFn() func() (int, error) { - return func() (int, error) { - return 1, nil - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index 520cbd7d41..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "runtime" - "runtime/debug" - "time" -) - -// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. -// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so -// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. Those are the defaults we can't alter. -func goRuntimeMemStats() memStatsMetrics { - return memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. - "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, - } -} - -type baseGoCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - gcLastTimeDesc *Desc - goInfoDesc *Desc -} - -func newBaseGoCollector() baseGoCollector { - return baseGoCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", - nil, nil), - gcLastTimeDesc: NewDesc( - "go_memstats_last_gc_time_seconds", - "Number of seconds since 1970 of last garbage collection.", - nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), - } -} - -// Describe returns all descriptions of the collector. -func (c *baseGoCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - ch <- c.gcLastTimeDesc - ch <- c.goInfoDesc -} - -// Collect returns the current state of all metrics of the collector. -func (c *baseGoCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - - n := getRuntimeNumThreads() - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) -} - -func memstatNamespace(s string) string { - return "go_memstats_" + s -} - -// memStatsMetrics provide description, evaluator, runtime/metrics name, and -// value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go deleted file mode 100644 index 897a6e906b..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package prometheus - -import ( - "runtime" - "sync" - "time" -) - -type goCollector struct { - base baseGoCollector - - // ms... are memstats related. - msLast *runtime.MemStats // Previously collected memstats. - msLastTimestamp time.Time - msMtx sync.Mutex // Protects msLast and msLastTimestamp. - msMetrics memStatsMetrics - msRead func(*runtime.MemStats) // For mocking in tests. - msMaxWait time.Duration // Wait time for fresh memstats. - msMaxAge time.Duration // Maximum allowed age of old memstats. -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - msMetrics := goRuntimeMemStats() - msMetrics = append(msMetrics, struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType - }{ - // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }) - return &goCollector{ - base: newBaseGoCollector(), - msLast: &runtime.MemStats{}, - msRead: runtime.ReadMemStats, - msMaxWait: time.Second, - msMaxAge: 5 * time.Minute, - msMetrics: msMetrics, - } -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - var ( - ms = &runtime.MemStats{} - done = make(chan struct{}) - ) - // Start reading memstats first as it might take a while. - go func() { - c.msRead(ms) - c.msMtx.Lock() - c.msLast = ms - c.msLastTimestamp = time.Now() - c.msMtx.Unlock() - close(done) - }() - - // Collect base non-memory metrics. - c.base.Collect(ch) - - timer := time.NewTimer(c.msMaxWait) - select { - case <-done: // Our own ReadMemStats succeeded in time. Use it. - timer.Stop() // Important for high collection frequencies to not pile up timers. - c.msCollect(ch, ms) - return - case <-timer.C: // Time out, use last memstats if possible. Continue below. - } - c.msMtx.Lock() - if time.Since(c.msLastTimestamp) < c.msMaxAge { - // Last memstats are recent enough. Collect from them under the lock. - c.msCollect(ch, c.msLast) - c.msMtx.Unlock() - return - } - // If we are here, the last memstats are too old or don't exist. We have - // to wait until our own ReadMemStats finally completes. For that to - // happen, we have to release the lock. - c.msMtx.Unlock() - <-done - c.msCollect(ch, ms) -} - -func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go deleted file mode 100644 index 6b8684731c..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "runtime/metrics" - "strings" - "sync" - - "github.com/prometheus/client_golang/prometheus/internal" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -const ( - // constants for strings referenced more than once. - goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" - goGCHeapAllocsObjects = "/gc/heap/allocs:objects" - goGCHeapFreesObjects = "/gc/heap/frees:objects" - goGCHeapFreesBytes = "/gc/heap/frees:bytes" - goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" - goGCHeapObjects = "/gc/heap/objects:objects" - goGCHeapGoalBytes = "/gc/heap/goal:bytes" - goMemoryClassesTotalBytes = "/memory/classes/total:bytes" - goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" - goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" - goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" - goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" - goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" - goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" - goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" - goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" - goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" - goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" - goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" - goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" - goMemoryClassesOtherBytes = "/memory/classes/other:bytes" -) - -// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. -var rmNamesForMemStatsMetrics = []string{ - goGCHeapTinyAllocsObjects, - goGCHeapAllocsObjects, - goGCHeapFreesObjects, - goGCHeapAllocsBytes, - goGCHeapObjects, - goGCHeapGoalBytes, - goMemoryClassesTotalBytes, - goMemoryClassesHeapObjectsBytes, - goMemoryClassesHeapUnusedBytes, - goMemoryClassesHeapReleasedBytes, - goMemoryClassesHeapFreeBytes, - goMemoryClassesHeapStacksBytes, - goMemoryClassesOSStacksBytes, - goMemoryClassesMetadataMSpanInuseBytes, - goMemoryClassesMetadataMSPanFreeBytes, - goMemoryClassesMetadataMCacheInuseBytes, - goMemoryClassesMetadataMCacheFreeBytes, - goMemoryClassesProfilingBucketsBytes, - goMemoryClassesMetadataOtherBytes, - goMemoryClassesOtherBytes, -} - -func bestEffortLookupRM(lookup []string) []metrics.Description { - ret := make([]metrics.Description, 0, len(lookup)) - for _, rm := range metrics.All() { - for _, m := range lookup { - if m == rm.Name { - ret = append(ret, rm) - } - } - } - return ret -} - -type goCollector struct { - base baseGoCollector - - // mu protects updates to all fields ensuring a consistent - // snapshot is always produced by Collect. - mu sync.Mutex - - // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). - sampleBuf []metrics.Sample - // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. - sampleMap map[string]*metrics.Sample - - // rmExposedMetrics represents all runtime/metrics package metrics - // that were configured to be exposed. - rmExposedMetrics []collectorMetric - rmExactSumMapForHist map[string]string - - // With Go 1.17, the runtime/metrics package was introduced. - // From that point on, metric names produced by the runtime/metrics - // package could be generated from runtime/metrics names. However, - // these differ from the old names for the same values. - // - // This field exists to export the same values under the old names - // as well. - msMetrics memStatsMetrics - msMetricsEnabled bool -} - -type rmMetricDesc struct { - metrics.Description -} - -func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { - var descs []rmMetricDesc - for _, d := range metrics.All() { - var ( - deny = true - desc rmMetricDesc - ) - - for _, r := range rules { - if !r.Matcher.MatchString(d.Name) { - continue - } - deny = r.Deny - } - if deny { - continue - } - - desc.Description = d - descs = append(descs, desc) - } - return descs -} - -func defaultGoCollectorOptions() internal.GoCollectorOptions { - return internal.GoCollectorOptions{ - RuntimeMetricSumForHist: map[string]string{ - "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, - "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, - }, - RuntimeMetricRules: []internal.GoCollectorRule{ - // Recommended metrics we want by default from runtime/metrics. - {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, - }, - } -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { - opt := defaultGoCollectorOptions() - for _, o := range opts { - o(&opt) - } - - exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) - - // Collect all histogram samples so that we can get their buckets. - // The API guarantees that the buckets are always fixed for the lifetime - // of the process. - var histograms []metrics.Sample - for _, d := range exposedDescriptions { - if d.Kind == metrics.KindFloat64Histogram { - histograms = append(histograms, metrics.Sample{Name: d.Name}) - } - } - - if len(histograms) > 0 { - metrics.Read(histograms) - } - - bucketsMap := make(map[string][]float64) - for i := range histograms { - bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets - } - - // Generate a collector for each exposed runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) - // SampleBuf is used for reading from runtime/metrics. - // We are assuming the largest case to have stable pointers for sampleMap purposes. - sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) - sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) - for _, d := range exposedDescriptions { - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) - if !ok { - // Just ignore this metric; we can't do anything with it here. - // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested in TestExpectedRuntimeMetrics. - continue - } - help := attachOriginalName(d.Description.Description, d.Name) - - sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) - sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] - - var m collectorMetric - if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := opt.RuntimeMetricSumForHist[d.Name] - unit := d.Name[strings.IndexRune(d.Name, ':')+1:] - m = newBatchHistogram( - NewDesc( - BuildFQName(namespace, subsystem, name), - help, - nil, - nil, - ), - internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit), - hasSum, - ) - } else if d.Cumulative { - m = NewCounter(CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: help, - }, - ) - } else { - m = NewGauge(GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: help, - }) - } - metricSet = append(metricSet, m) - } - - // Add exact sum metrics to sampleBuf if not added before. - for _, h := range histograms { - sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] - if !ok { - continue - } - - if _, ok := sampleMap[sumMetric]; ok { - continue - } - sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) - sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] - } - - var ( - msMetrics memStatsMetrics - msDescriptions []metrics.Description - ) - - if !opt.DisableMemStatsLikeMetrics { - msMetrics = goRuntimeMemStats() - msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) - - // Check if metric was not exposed before and if not, add to sampleBuf. - for _, mdDesc := range msDescriptions { - if _, ok := sampleMap[mdDesc.Name]; ok { - continue - } - sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) - sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] - } - } - - return &goCollector{ - base: newBaseGoCollector(), - sampleBuf: sampleBuf, - sampleMap: sampleMap, - rmExposedMetrics: metricSet, - rmExactSumMapForHist: opt.RuntimeMetricSumForHist, - msMetrics: msMetrics, - msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, - } -} - -func attachOriginalName(desc, origName string) string { - return fmt.Sprintf("%s Sourced from %s.", desc, origName) -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } - for _, m := range c.rmExposedMetrics { - ch <- m.Desc() - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - // Collect base non-memory metrics. - c.base.Collect(ch) - - if len(c.sampleBuf) == 0 { - return - } - - // Collect must be thread-safe, so prevent concurrent use of - // sampleBuf elements. Just read into sampleBuf but write all the data - // we get into our Metrics or MemStats. - // - // This lock also ensures that the Metrics we send out are all from - // the same updates, ensuring their mutual consistency insofar as - // is guaranteed by the runtime/metrics package. - // - // N.B. This locking is heavy-handed, but Collect is expected to be called - // relatively infrequently. Also the core operation here, metrics.Read, - // is fast (O(tens of microseconds)) so contention should certainly be - // low, though channel operations and any allocations may add to that. - c.mu.Lock() - defer c.mu.Unlock() - - // Populate runtime/metrics sample buffer. - metrics.Read(c.sampleBuf) - - // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). - for i, metric := range c.rmExposedMetrics { - // We created samples for exposed metrics first in order, so indexes match. - sample := c.sampleBuf[i] - - // N.B. switch on concrete type because it's significantly more efficient - // than checking for the Counter and Gauge interface implementations. In - // this case, we control all the types here. - switch m := metric.(type) { - case *counter: - // Guard against decreases. This should never happen, but a failure - // to do so will result in a panic, which is a harsh consequence for - // a metrics collection bug. - v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) - if v1 > v0 { - m.Add(unwrapScalarRMValue(sample.Value) - m.get()) - } - m.Collect(ch) - case *gauge: - m.Set(unwrapScalarRMValue(sample.Value)) - m.Collect(ch) - case *batchHistogram: - m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) - m.Collect(ch) - default: - panic("unexpected metric type") - } - } - - if c.msMetricsEnabled { - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it if goMemStatsCollection is enabled. - var ms runtime.MemStats - memStatsFromRM(&ms, c.sampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) - } - } -} - -// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed -// to be scalar and returns the equivalent float64 value. Panics if the -// value is not scalar. -func unwrapScalarRMValue(v metrics.Value) float64 { - switch v.Kind() { - case metrics.KindUint64: - return float64(v.Uint64()) - case metrics.KindFloat64: - return v.Float64() - case metrics.KindBad: - // Unsupported metric. - // - // This should never happen because we always populate our metric - // set from the runtime/metrics package. - panic("unexpected bad kind metric") - default: - // Unsupported metric kind. - // - // This should never happen because we check for this during initialization - // and flag and filter metrics whose kinds we don't understand. - panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) - } -} - -// exactSumFor takes a runtime/metrics metric name (that is assumed to -// be of kind KindFloat64Histogram) and returns its exact sum and whether -// its exact sum exists. -// -// The runtime/metrics API for histograms doesn't currently expose exact -// sums, but some of the other metrics are in fact exact sums of histograms. -func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := c.rmExactSumMapForHist[rmName] - if !ok { - return 0 - } - s, ok := c.sampleMap[sumName] - if !ok { - return 0 - } - return unwrapScalarRMValue(s.Value) -} - -func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { - lookupOrZero := func(name string) uint64 { - if s, ok := rm[name]; ok { - return s.Value.Uint64() - } - return 0 - } - - // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees. - // The reason for this is because MemStats couldn't be extended at the time - // but there was a desire to have Mallocs at least be a little more representative, - // while having Mallocs - Frees still represent a live object count. - // Unfortunately, MemStats doesn't actually export a large allocation count, - // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) - ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs - ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - - ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) - ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) - ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) - ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) - ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) - ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) - ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero(goGCHeapObjects) - ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) - ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) - ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) - ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) - ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) - ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) - ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) - ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) - ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) - ms.NextGC = lookupOrZero(goGCHeapGoalBytes) - - // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, - // and often misleading due to the fact that it's an average over the lifetime - // of the process. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.GCCPUFraction = 0 -} - -// batchHistogram is a mutable histogram that is updated -// in batches. -type batchHistogram struct { - selfCollector - - // Static fields updated only once. - desc *Desc - hasSum bool - - // Because this histogram operates in batches, it just uses a - // single mutex for everything. updates are always serialized - // but Write calls may operate concurrently with updates. - // Contention between these two sources should be rare. - mu sync.Mutex - buckets []float64 // Inclusive lower bounds, like runtime/metrics. - counts []uint64 - sum float64 // Used if hasSum is true. -} - -// newBatchHistogram creates a new batch histogram value with the given -// Desc, buckets, and whether or not it has an exact sum available. -// -// buckets must always be from the runtime/metrics package, following -// the same conventions. -func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { - // We need to remove -Inf values. runtime/metrics keeps them around. - // But -Inf bucket should not be allowed for prometheus histograms. - if buckets[0] == math.Inf(-1) { - buckets = buckets[1:] - } - h := &batchHistogram{ - desc: desc, - buckets: buckets, - // Because buckets follows runtime/metrics conventions, there's - // 1 more value in the buckets list than there are buckets represented, - // because in runtime/metrics, the bucket values represent *boundaries*, - // and non-Inf boundaries are inclusive lower bounds for that bucket. - counts: make([]uint64, len(buckets)-1), - hasSum: hasSum, - } - h.init(h) - return h -} - -// update updates the batchHistogram from a runtime/metrics histogram. -// -// sum must be provided if the batchHistogram was created to have an exact sum. -// h.buckets must be a strict subset of his.Buckets. -func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { - counts, buckets := his.Counts, his.Buckets - - h.mu.Lock() - defer h.mu.Unlock() - - // Clear buckets. - for i := range h.counts { - h.counts[i] = 0 - } - // Copy and reduce buckets. - var j int - for i, count := range counts { - h.counts[j] += count - if buckets[i+1] == h.buckets[j+1] { - j++ - } - } - if h.hasSum { - h.sum = sum - } -} - -func (h *batchHistogram) Desc() *Desc { - return h.desc -} - -func (h *batchHistogram) Write(out *dto.Metric) error { - h.mu.Lock() - defer h.mu.Unlock() - - sum := float64(0) - if h.hasSum { - sum = h.sum - } - dtoBuckets := make([]*dto.Bucket, 0, len(h.counts)) - totalCount := uint64(0) - for i, count := range h.counts { - totalCount += count - if !h.hasSum { - if count != 0 { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) - } - } - - // Skip the +Inf bucket, but only for the bucket list. - // It must still count for sum and totalCount. - if math.IsInf(h.buckets[i+1], 1) { - break - } - // Float64Histogram's upper bound is exclusive, so make it inclusive - // by obtaining the next float64 value down, in order. - upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i]) - dtoBuckets = append(dtoBuckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(totalCount), - UpperBound: proto.Float64(upperBound), - }) - } - out.Histogram = &dto.Histogram{ - Bucket: dtoBuckets, - SampleCount: proto.Uint64(totalCount), - SampleSum: proto.Float64(sum), - } - return nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index c453b754a7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,2056 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - nativeHistogramSchemaMaximum = 8 - nativeHistogramSchemaMinimum = -4 -) - -// nativeHistogramBounds for the frac of observed values. Only relevant for -// schema > 0. The position in the slice is the schema. (0 is never used, just -// here for convenience of using the schema directly as the index.) -// -// TODO(beorn7): Currently, we do a binary search into these slices. There are -// ways to turn it into a small number of simple array lookups. It probably only -// matters for schema 5 and beyond, but should be investigated. See this comment -// as a starting point: -// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 -var nativeHistogramBounds = [][]float64{ - // Schema "0": - {0.5}, - // Schema 1: - {0.5, 0.7071067811865475}, - // Schema 2: - {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, - // Schema 3: - { - 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, - 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, - }, - // Schema 4: - { - 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, - 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, - 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, - 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, - }, - // Schema 5: - { - 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, - 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, - 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, - 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, - 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, - 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, - 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, - 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, - }, - // Schema 6: - { - 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, - 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, - 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, - 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, - 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, - 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, - 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, - 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, - 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, - 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, - 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, - 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, - 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, - 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, - 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, - 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, - }, - // Schema 7: - { - 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, - 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, - 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, - 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, - 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, - 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, - 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, - 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, - 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, - 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, - 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, - 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, - 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, - 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, - 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, - 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, - 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, - 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, - 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, - 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, - 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, - 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, - 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, - 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, - 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, - 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, - 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, - 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, - 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, - 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, - 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, - 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, - }, - // Schema 8: - { - 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, - 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, - 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, - 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, - 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, - 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, - 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, - 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, - 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, - 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, - 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, - 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, - 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, - 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, - 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, - 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, - 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, - 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, - 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, - 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, - 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, - 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, - 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, - 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, - 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, - 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, - 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, - 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, - 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, - 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, - 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, - 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, - 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, - 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, - 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, - 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, - 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, - 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, - 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, - 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, - 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, - 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, - 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, - 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, - 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, - 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, - 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, - 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, - 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, - 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, - 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, - 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, - 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, - 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, - 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, - 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, - 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, - 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, - 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, - 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, - 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, - 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, - 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, - 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, - }, -} - -// The nativeHistogramBounds above can be generated with the code below. -// -// TODO(beorn7): It's tempting to actually use `go generate` to generate the -// code above. However, this could lead to slightly different numbers on -// different architectures. We still need to come to terms if we are fine with -// that, or if we might prefer to specify precise numbers in the standard. -// -// var nativeHistogramBounds [][]float64 = make([][]float64, 9) -// -// func init() { -// // Populate nativeHistogramBounds. -// numBuckets := 1 -// for i := range nativeHistogramBounds { -// bounds := []float64{0.5} -// factor := math.Exp2(math.Exp2(float64(-i))) -// for j := 0; j < numBuckets-1; j++ { -// var bound float64 -// if (j+1)%2 == 0 { -// // Use previously calculated value for increased precision. -// bound = nativeHistogramBounds[i-1][j/2+1] -// } else { -// bound = bounds[j] * factor -// } -// bounds = append(bounds, bound) -// } -// numBuckets *= 2 -// nativeHistogramBounds[i] = bounds -// } -// } - -// A Histogram counts individual observations from an event or sample stream in -// configurable static buckets (or in dynamic sparse buckets as part of the -// experimental Native Histograms, see below for more details). Similar to a -// Summary, it also provides a sum of observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile PromQL function. -// -// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL -// (see the documentation for detailed procedures). However, Histograms require -// the user to pre-define suitable buckets, and they are in general less -// accurate. (Both problems are addressed by the experimental Native -// Histograms. To use them, configure a NativeHistogramBucketFactor in the -// HistogramOpts. They also require a Prometheus server v2.40+ with the -// corresponding feature flag enabled.) -// -// The Observe method of a Histogram has a very low performance overhead in -// comparison with the Observe method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. (The experimental Native - // Histograms handle negative observations properly.) See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - -// DefNativeHistogramZeroThreshold is the default value for -// NativeHistogramZeroThreshold in the HistogramOpts. -// -// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), -// which is a bucket boundary at all possible resolutions. -const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 - -// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold -// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero -// bucket that only receives observations of precisely zero. -const NativeHistogramZeroThresholdZero = -1 - -var errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, -) - -// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the -// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not -// counted and not included in the returned slice. The returned slice is meant -// to be used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket -// has an upper bound of 'start' and each following bucket's upper bound is -// 'factor' times the previous bucket's upper bound. The final +Inf bucket is -// not counted and not included in the returned slice. The returned slice is -// meant to be used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is -// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { - if count < 1 { - panic("ExponentialBucketsRange count needs a positive count") - } - if minBucket <= 0 { - panic("ExponentialBucketsRange min needs to be greater than 0") - } - - // Formula for exponential buckets. - // max = min*growthFactor^(bucketCount-1) - - // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) - - // Now that we know growthFactor, solve for each bucket. - buckets := make([]float64, count) - for i := 1; i <= count; i++ { - buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. If Buckets is left as nil or set to a slice of length - // zero, it is replaced by default buckets. The default buckets are - // DefBuckets if no buckets for a native histogram (see below) are used, - // otherwise the default is no buckets. (In other words, if you want to - // use both regular buckets and buckets for a native histogram, you have - // to define the regular buckets here explicitly.) - Buckets []float64 - - // If NativeHistogramBucketFactor is greater than one, so-called sparse - // buckets are used (in addition to the regular buckets, if defined - // above). A Histogram with sparse buckets will be ingested as a Native - // Histogram by a Prometheus server with that feature enabled (requires - // Prometheus v2.40+). Sparse buckets are exponential buckets covering - // the whole float64 range (with the exception of the “zero” bucket, see - // NativeHistogramZeroThreshold below). From any one bucket to the next, - // the width of the bucket grows by a constant - // factor. NativeHistogramBucketFactor provides an upper bound for this - // factor (exception see below). The smaller - // NativeHistogramBucketFactor, the more buckets will be used and thus - // the more costly the histogram will become. A generally good trade-off - // between cost and accuracy is a value of 1.1 (each bucket is at most - // 10% wider than the previous one), which will result in each power of - // two divided into 8 buckets (e.g. there will be 8 buckets between 1 - // and 2, same as between 2 and 4, and 4 and 8, etc.). - // - // Details about the actually used factor: The factor is calculated as - // 2^(2^-n), where n is an integer number between (and including) -4 and - // 8. n is chosen so that the resulting factor is the largest that is - // still smaller or equal to NativeHistogramBucketFactor. Note that the - // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) - // ). If NativeHistogramBucketFactor is greater than 1 but smaller than - // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though - // it is larger than the provided NativeHistogramBucketFactor. - // - // NOTE: Native Histograms are still an experimental feature. Their - // behavior might still change without a major version - // bump. Subsequently, all NativeHistogram... options here might still - // change their behavior or name (or might completely disappear) without - // a major version bump. - NativeHistogramBucketFactor float64 - // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. - // For best results, this should be close to a bucket boundary. This is - // usually the case if picking a power of two. If - // NativeHistogramZeroThreshold is left at zero, - // DefNativeHistogramZeroThreshold is used as the threshold. To - // configure a zero bucket with an actual threshold of zero (i.e. only - // observations of precisely zero will go into the zero bucket), set - // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero - // constant (or any negative float value). - NativeHistogramZeroThreshold float64 - - // The next three fields define a strategy to limit the number of - // populated sparse buckets. If NativeHistogramMaxBucketNumber is left - // at zero, the number of buckets is not limited. (Note that this might - // lead to unbounded memory consumption if the values observed by the - // Histogram are sufficiently wide-spread. In particular, this could be - // used as a DoS attack vector. Where the observed values depend on - // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set - // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: - // - First, if the last reset (or the creation) of the histogram is at - // least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). - // - If less time has passed, or if NativeHistogramMinResetDuration is - // zero, no reset is performed. Instead, the zero threshold is - // increased sufficiently to reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. - // - After that, if the number of buckets still exceeds - // NativeHistogramMaxBucketNumber, the resolution of the histogram is - // reduced by doubling the width of the sparse buckets (up to a - // growth factor between one bucket to the next of 2^(2^4) = 65536, - // see above). - // - Any increased zero threshold or reduced resolution is reset back - // to their original values once NativeHistogramMinResetDuration has - // passed (since the last reset or the creation of the histogram). - NativeHistogramMaxBucketNumber uint32 - NativeHistogramMinResetDuration time.Duration - NativeHistogramMaxZeroThreshold float64 - - // NativeHistogramMaxExemplars limits the number of exemplars - // that are kept in memory for each native histogram. If you leave it at - // zero, a default value of 10 is used. If no exemplars should be kept specifically - // for native histograms, set it to a negative value. (Scrapers can - // still use the exemplars exposed for classic buckets, which are managed - // independently.) - NativeHistogramMaxExemplars int - // NativeHistogramExemplarTTL is only checked once - // NativeHistogramMaxExemplars is exceeded. In that case, the - // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. - // Otherwise, the older exemplar in the pair of exemplars that are closest - // together (on an exponential scale) is removed. - // If NativeHistogramExemplarTTL is left at its zero value, a default value of - // 5m is used. To always delete the oldest exemplar, set it to a negative value. - NativeHistogramExemplarTTL time.Duration - - // now is for testing purposes, by default it's time.Now. - now func() time.Time - - // afterFunc is for testing purposes, by default it's time.AfterFunc. - afterFunc func(time.Duration, func()) *time.Timer -} - -// HistogramVecOpts bundles the options to create a HistogramVec metric. -// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type HistogramVecOpts struct { - HistogramOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -// -// The returned implementation also implements ExemplarObserver. It is safe to -// perform the corresponding type assertion. Exemplars are tracked separately -// for each bucket. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels.names) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) - } - - for _, n := range desc.variableLabels.names { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if opts.now == nil { - opts.now = time.Now - } - if opts.afterFunc == nil { - opts.afterFunc = time.AfterFunc - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, - nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, - nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: opts.now(), - now: opts.now, - afterFunc: opts.afterFunc, - } - if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { - h.upperBounds = DefBuckets - } - if opts.NativeHistogramBucketFactor <= 1 { - h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. - } else { - switch { - case opts.NativeHistogramZeroThreshold > 0: - h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold - case opts.NativeHistogramZeroThreshold == 0: - h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold - } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. - h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) - h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make buckets - // for both counts as well as exemplars: - h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} - atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) - atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) - h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} - atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) - atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) - h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) - - h.init(h) // Init self-collection. - return h -} - -type histogramCounts struct { - // Order in this struct matters for the alignment required by atomic - // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG - - // sumBits contains the bits of the float64 representing the sum of all - // observations. - sumBits uint64 - count uint64 - - // nativeHistogramZeroBucket counts all (positive and negative) - // observations in the zero bucket (with an absolute value less or equal - // the current threshold, see next field. - nativeHistogramZeroBucket uint64 - // nativeHistogramZeroThresholdBits is the bit pattern of the current - // threshold for the zero bucket. It's initially equal to - // nativeHistogramZeroThreshold but may change according to the bucket - // count limitation strategy. - nativeHistogramZeroThresholdBits uint64 - // nativeHistogramSchema may change over time according to the bucket - // count limitation strategy and therefore has to be saved here. - nativeHistogramSchema int32 - // Number of (positive and negative) sparse buckets. - nativeHistogramBucketsNumber uint32 - - // Regular buckets. - buckets []uint64 - - // The sparse buckets for native histograms are implemented with a - // sync.Map for now. A dedicated data structure will likely be more - // efficient. There are separate maps for negative and positive - // observations. The map's value is an *int64, counting observations in - // that bucket. (Note that we don't use uint64 as an int64 won't - // overflow in practice, and working with signed numbers from the - // beginning simplifies the handling of deltas.) The map's key is the - // index of the bucket according to the used - // nativeHistogramSchema. Index 0 is for an upper bound of 1. - nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map -} - -// observe manages the parts of observe that only affects -// histogramCounts. doSparse is true if sparse buckets should be done, -// too. -func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { - if bucket < len(hc.buckets) { - atomic.AddUint64(&hc.buckets[bucket], 1) - } - atomicAddFloat(&hc.sumBits, v) - if doSparse && !math.IsNaN(v) { - var ( - key int - schema = atomic.LoadInt32(&hc.nativeHistogramSchema) - zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) - bucketCreated, isInf bool - ) - if math.IsInf(v, 0) { - // Pretend v is MaxFloat64 but later increment key by one. - if math.IsInf(v, +1) { - v = math.MaxFloat64 - } else { - v = -math.MaxFloat64 - } - isInf = true - } - frac, exp := math.Frexp(math.Abs(v)) - if schema > 0 { - bounds := nativeHistogramBounds[schema] - key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) - } else { - key = exp - if frac == 0.5 { - key-- - } - offset := (1 << -schema) - 1 - key = (key + offset) >> -schema - } - if isInf { - key++ - } - switch { - case v > zeroThreshold: - bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) - case v < -zeroThreshold: - bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) - default: - atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) - } - if bucketCreated { - atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hc.count, 1) -} - -type histogram struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // histogramCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the histogram) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cold fields must - // be merged into the new hot before releasing writeMtx. - // - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - - // Only used in the Write method and for sparse bucket management. - mtx sync.Mutex - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. - nativeHistogramZeroThreshold float64 // The initial zero threshold. - nativeHistogramMaxZeroThreshold float64 - nativeHistogramMaxBuckets uint32 - nativeHistogramMinResetDuration time.Duration - // lastResetTime is protected by mtx. It is also used as created timestamp. - lastResetTime time.Time - // resetScheduled is protected by mtx. It is true if a reset is - // scheduled for a later time (when nativeHistogramMinResetDuration has - // passed). - resetScheduled bool - nativeExemplars nativeExemplars - - // now is for testing purposes, by default it's time.Now. - now func() time.Time - - // afterFunc is for testing purposes, by default it's time.AfterFunc. - afterFunc func(time.Duration, func()) *time.Timer -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - h.observe(v, h.findBucket(v)) -} - -// ObserveWithExemplar should not be called in a high-frequency setting -// for a native histogram with configured exemplars. For this case, -// the implementation isn't lock-free and might suffer from lock contention. -func (h *histogram) ObserveWithExemplar(v float64, e Labels) { - i := h.findBucket(v) - h.observe(v, i) - h.updateExemplar(v, i, e) -} - -func (h *histogram) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - h.mtx.Lock() - defer h.mtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := h.counts[n>>63] - coldCounts := h.counts[(^n)>>63] - - waitForCooldown(count, coldCounts) - - his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - CreatedTimestamp: timestamppb.New(h.lastResetTime), - } - out.Histogram = his - out.Label = h.labelPairs - - var cumCount uint64 - for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - his.Bucket[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), - UpperBound: proto.Float64(upperBound), - } - if e := h.exemplars[i].Load(); e != nil { - his.Bucket[i].Exemplar = e.(*dto.Exemplar) - } - } - // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. - if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e.(*dto.Exemplar), - } - his.Bucket = append(his.Bucket, b) - } - if h.nativeHistogramSchema > math.MinInt32 { - his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) - his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) - zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) - - defer func() { - coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) - coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) - }() - - his.ZeroCount = proto.Uint64(zeroBucket) - his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) - his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) - - // Add a no-op span to a histogram without observations and with - // a zero threshold of zero. Otherwise, a native histogram would - // look like a classic histogram to scrapers. - if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { - his.PositiveSpan = []*dto.BucketSpan{{ - Offset: proto.Int32(0), - Length: proto.Uint32(0), - }} - } - - if h.nativeExemplars.isEnabled() { - h.nativeExemplars.Lock() - his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) - h.nativeExemplars.Unlock() - } - - } - addAndResetCounts(hotCounts, coldCounts) - return nil -} - -// findBucket returns the index of the bucket for the provided value, or -// len(h.upperBounds) for the +Inf bucket. -func (h *histogram) findBucket(v float64) int { - n := len(h.upperBounds) - if n == 0 { - return 0 - } - - // Early exit: if v is less than or equal to the first upper bound, return 0 - if v <= h.upperBounds[0] { - return 0 - } - - // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) - if v > h.upperBounds[n-1] { - return n - } - - // For small arrays, use simple linear search - // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers - // see more details here: https://github.com/prometheus/client_golang/pull/1662 - if n < 35 { - for i, bound := range h.upperBounds { - if v <= bound { - return i - } - } - // If v is greater than all upper bounds, return len(h.upperBounds) - return n - } - - // For larger arrays, use stdlib's binary search - return sort.SearchFloat64s(h.upperBounds, v) -} - -// observe is the implementation for Observe without the findBucket part. -func (h *histogram) observe(v float64, bucket int) { - // Do not add to sparse buckets for NaN observations. - doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] - hotCounts.observe(v, bucket, doSparse) - if doSparse { - h.limitBuckets(hotCounts, v, bucket) - } -} - -// limitBuckets applies a strategy to limit the number of populated sparse -// buckets. It's generally best effort, and there are situations where the -// number can go higher (if even the lowest resolution isn't enough to reduce -// the number sufficiently, or if the provided counts aren't fully updated yet -// by a concurrently happening Write call). -func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { - if h.nativeHistogramMaxBuckets == 0 { - return // No limit configured. - } - if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { - return // Bucket limit not exceeded yet. - } - - h.mtx.Lock() - defer h.mtx.Unlock() - - // The hot counts might have been swapped just before we acquired the - // lock. Re-fetch the hot counts first... - n := atomic.LoadUint64(&h.countAndHotIdx) - hotIdx := n >> 63 - coldIdx := (^n) >> 63 - hotCounts := h.counts[hotIdx] - coldCounts := h.counts[coldIdx] - // ...and then check again if we really have to reduce the bucket count. - if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { - return // Bucket limit not exceeded after all. - } - // Try the various strategies in order. - if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { - return - } - // One of the other strategies will happen. To undo what they will do as - // soon as enough time has passed to satisfy - // h.nativeHistogramMinResetDuration, schedule a reset at the right time - // if we haven't done so already. - if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { - h.resetScheduled = true - h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) - } - - if h.maybeWidenZeroBucket(hotCounts, coldCounts) { - return - } - h.doubleBucketWidth(hotCounts, coldCounts) -} - -// maybeReset resets the whole histogram if at least -// h.nativeHistogramMinResetDuration has been passed. It returns true if the -// histogram has been reset. The caller must have locked h.mtx. -func (h *histogram) maybeReset( - hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, -) bool { - // We are using the possibly mocked h.now() rather than - // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || // No reset configured. - h.resetScheduled || // Do not interefere if a reset is already scheduled. - h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { - return false - } - // Completely reset coldCounts. - h.resetCounts(cold) - // Repeat the latest observation to not lose it completely. - cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while resetting countAndHotIdx. - n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) - count := n & ((1 << 63) - 1) - waitForCooldown(count, hot) - // Finally, reset the formerly hot counts, too. - h.resetCounts(hot) - h.lastResetTime = h.now() - return true -} - -// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be -// called without having locked h.mtx. -func (h *histogram) reset() { - h.mtx.Lock() - defer h.mtx.Unlock() - - n := atomic.LoadUint64(&h.countAndHotIdx) - hotIdx := n >> 63 - coldIdx := (^n) >> 63 - hot := h.counts[hotIdx] - cold := h.counts[coldIdx] - // Completely reset coldCounts. - h.resetCounts(cold) - // Make coldCounts the new hot counts while resetting countAndHotIdx. - n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) - count := n & ((1 << 63) - 1) - waitForCooldown(count, hot) - // Finally, reset the formerly hot counts, too. - h.resetCounts(hot) - h.lastResetTime = h.now() - h.resetScheduled = false -} - -// maybeWidenZeroBucket widens the zero bucket until it includes the existing -// buckets closest to the zero bucket (which could be two, if an equidistant -// negative and a positive bucket exists, but usually it's only one bucket to be -// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold -// limits how far the zero bucket can be extended, and if that's not enough to -// include an existing bucket, the method returns false. The caller must have -// locked h.mtx. -func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { - currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) - if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { - return false - } - // Find the key of the bucket closest to zero. - smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) - smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) - if smallestNegativeKey < smallestKey { - smallestKey = smallestNegativeKey - } - if smallestKey == math.MaxInt32 { - return false - } - newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) - if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { - return false // New threshold would exceed the max threshold. - } - atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) - // Remove applicable buckets. - if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } - if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } - // Make cold counts the new hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - count := n & ((1 << 63) - 1) - // Swap the pointer names to represent the new roles and make - // the rest less confusing. - hot, cold = cold, hot - waitForCooldown(count, cold) - // Add all the now cold counts to the new hot counts... - addAndResetCounts(hot, cold) - // ...adjust the new zero threshold in the cold counts, too... - atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) - // ...and then merge the newly deleted buckets into the wider zero - // bucket. - mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - key := k.(int) - bucket := v.(*int64) - if key == smallestKey { - // Merge into hot zero bucket... - atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) - // ...and delete from cold counts. - coldBuckets.Delete(key) - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } else { - // Add to corresponding hot bucket... - if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) - } - // ...and reset cold bucket. - atomic.StoreInt64(bucket, 0) - } - return true - } - } - - cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) - cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) - return true -} - -// doubleBucketWidth doubles the bucket width (by decrementing the schema -// number). Note that very sparse buckets could lead to a low reduction of the -// bucket count (or even no reduction at all). The method does nothing if the -// schema is already -4. -func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { - coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) - if coldSchema == -4 { - return // Already at lowest resolution. - } - coldSchema-- - atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) - // Play it simple and just delete all cold buckets. - atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) - deleteSyncMap(&cold.nativeHistogramBucketsNegative) - deleteSyncMap(&cold.nativeHistogramBucketsPositive) - // Make coldCounts the new hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - count := n & ((1 << 63) - 1) - // Swap the pointer names to represent the new roles and make - // the rest less confusing. - hot, cold = cold, hot - waitForCooldown(count, cold) - // Add all the now cold counts to the new hot counts... - addAndResetCounts(hot, cold) - // ...adjust the schema in the cold counts, too... - atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) - // ...and then merge the cold buckets into the wider hot buckets. - merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - key := k.(int) - bucket := v.(*int64) - // Adjust key to match the bucket to merge into. - if key > 0 { - key++ - } - key /= 2 - // Add to corresponding hot bucket. - if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) - } - return true - } - } - - cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) - cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) - // Play it simple again and just delete all cold buckets. - atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) - deleteSyncMap(&cold.nativeHistogramBucketsNegative) - deleteSyncMap(&cold.nativeHistogramBucketsPositive) -} - -func (h *histogram) resetCounts(counts *histogramCounts) { - atomic.StoreUint64(&counts.sumBits, 0) - atomic.StoreUint64(&counts.count, 0) - atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) - atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) - atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) - atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) - for i := range h.upperBounds { - atomic.StoreUint64(&counts.buckets[i], 0) - } - deleteSyncMap(&counts.nativeHistogramBucketsNegative) - deleteSyncMap(&counts.nativeHistogramBucketsPositive) -} - -// updateExemplar replaces the exemplar for the provided classic bucket. -// With empty labels, it's a no-op. It panics if any of the labels is invalid. -// If histogram is native, the exemplar will be cached into nativeExemplars, -// which has a limit, and will remove one exemplar when limit is reached. -func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, h.now(), l) - if err != nil { - panic(err) - } - h.exemplars[bucket].Store(e) - doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) - if doSparse { - h.nativeExemplars.addExemplar(e) - } -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - return V2.NewHistogramVec(HistogramVecOpts{ - HistogramOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. -func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts.HistogramOpts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair - createdTs *timestamppb.Timestamp -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{ - CreatedTimestamp: h.createdTs, - } - - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstHistogram would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. -func NewConstHistogramWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - ct time.Time, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - createdTs: timestamppb.New(ct), - }, nil -} - -// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where -// NewConstHistogramWithCreatedTimestamp would have returned an error. -func MustNewConstHistogramWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - ct time.Time, - labelValues ...string, -) Metric { - m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} - -// pickSchema returns the largest number n between -4 and 8 such that -// 2^(2^-n) is less or equal the provided bucketFactor. -// -// Special cases: -// - bucketFactor <= 1: panics. -// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. -func pickSchema(bucketFactor float64) int32 { - if bucketFactor <= 1 { - panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) - } - floor := math.Floor(math.Log2(math.Log2(bucketFactor))) - switch { - case floor <= -8: - return nativeHistogramSchemaMaximum - case floor >= 4: - return nativeHistogramSchemaMinimum - default: - return -int32(floor) - } -} - -func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { - var ii []int - buckets.Range(func(k, v interface{}) bool { - ii = append(ii, k.(int)) - return true - }) - sort.Ints(ii) - - if len(ii) == 0 { - return nil, nil - } - - var ( - spans []*dto.BucketSpan - deltas []int64 - prevCount int64 - nextI int - ) - - appendDelta := func(count int64) { - *spans[len(spans)-1].Length++ - deltas = append(deltas, count-prevCount) - prevCount = count - } - - for n, i := range ii { - v, _ := buckets.Load(i) - count := atomic.LoadInt64(v.(*int64)) - // Multiple spans with only small gaps in between are probably - // encoded more efficiently as one larger span with a few empty - // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one or two buckets should not create - // a new span. - iDelta := int32(i - nextI) - if n == 0 || iDelta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap - // of more than two buckets. - spans = append(spans, &dto.BucketSpan{ - Offset: proto.Int32(iDelta), - Length: proto.Uint32(0), - }) - } else { - // We have found a small gap (or no gap at all). - // Insert empty buckets as needed. - for j := int32(0); j < iDelta; j++ { - appendDelta(0) - } - } - appendDelta(count) - nextI = i + 1 - } - return spans, deltas -} - -// addToBucket increments the sparse bucket at key by the provided amount. It -// returns true if a new sparse bucket had to be created for that. -func addToBucket(buckets *sync.Map, key int, increment int64) bool { - if existingBucket, ok := buckets.Load(key); ok { - // Fast path without allocation. - atomic.AddInt64(existingBucket.(*int64), increment) - return false - } - // Bucket doesn't exist yet. Slow path allocating new counter. - newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. - if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { - // The bucket was created concurrently in another goroutine. - // Have to increment after all. - atomic.AddInt64(actualBucket.(*int64), increment) - return false - } - return true -} - -// addAndReset returns a function to be used with sync.Map.Range of spare -// buckets in coldCounts. It increments the buckets in the provided hotBuckets -// according to the buckets ranged through. It then resets all buckets ranged -// through to 0 (but leaves them in place so that they don't need to get -// recreated on the next scrape). -func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - bucket := v.(*int64) - if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { - atomic.AddUint32(bucketNumber, 1) - } - atomic.StoreInt64(bucket, 0) - return true - } -} - -func deleteSyncMap(m *sync.Map) { - m.Range(func(k, v interface{}) bool { - m.Delete(k) - return true - }) -} - -func findSmallestKey(m *sync.Map) int { - result := math.MaxInt32 - m.Range(func(k, v interface{}) bool { - key := k.(int) - if key < result { - result = key - } - return true - }) - return result -} - -func getLe(key int, schema int32) float64 { - // Here a bit of context about the behavior for the last bucket counting - // regular numbers (called simply "last bucket" below) and the bucket - // counting observations of ±Inf (called "inf bucket" below, with a key - // one higher than that of the "last bucket"): - // - // If we apply the usual formula to the last bucket, its upper bound - // would be calculated as +Inf. The reason is that the max possible - // regular float64 number (math.MaxFloat64) doesn't coincide with one of - // the calculated bucket boundaries. So the calculated boundary has to - // be larger than math.MaxFloat64, and the only float64 larger than - // math.MaxFloat64 is +Inf. However, we want to count actual - // observations of ±Inf in the inf bucket. Therefore, we have to treat - // the upper bound of the last bucket specially and set it to - // math.MaxFloat64. (The upper bound of the inf bucket, with its key - // being one higher than that of the last bucket, naturally comes out as - // +Inf by the usual formula. So that's fine.) - // - // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of - // 1024. If there were a float64 number following math.MaxFloat64, it - // would have a frac of 1.0 and an exp of 1024, or equivalently a frac - // of 0.5 and an exp of 1025. However, since frac must be smaller than - // 1, and exp must be smaller than 1025, either representation overflows - // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the - // largest possible float64. Q.E.D.) However, the formula for - // calculating the upper bound from the idx and schema of the last - // bucket results in precisely that. It is either frac=1.0 & exp=1024 - // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, - // by the way, a power of two where the exponent itself is a power of - // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all - // schemas.) So these are the special cases we have to catch below. - if schema < 0 { - exp := key << -schema - if exp == 1024 { - // This is the last bucket before the overflow bucket - // (for ±Inf observations). Return math.MaxFloat64 as - // explained above. - return math.MaxFloat64 - } - return math.Ldexp(1, exp) - } - - fracIdx := key & ((1 << schema) - 1) - frac := nativeHistogramBounds[schema][fracIdx] - exp := (key >> schema) + 1 - if frac == 0.5 && exp == 1025 { - // This is the last bucket before the overflow bucket (for ±Inf - // observations). Return math.MaxFloat64 as explained above. - return math.MaxFloat64 - } - return math.Ldexp(frac, exp) -} - -// waitForCooldown returns after the count field in the provided histogramCounts -// has reached the provided count value. -func waitForCooldown(count uint64, counts *histogramCounts) { - for count != atomic.LoadUint64(&counts.count) { - runtime.Gosched() // Let observations get work done. - } -} - -// atomicAddFloat adds the provided float atomically to another float -// represented by the bit pattern the bits pointer is pointing to. -func atomicAddFloat(bits *uint64, v float64) { - for { - loadedBits := atomic.LoadUint64(bits) - newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } - } -} - -// atomicDecUint32 atomically decrements the uint32 p points to. See -// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. -func atomicDecUint32(p *uint32) { - atomic.AddUint32(p, ^uint32(0)) -} - -// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero -// bucket) from the cold counts to the corresponding fields in the hot -// counts. Those fields are then reset to 0 in the cold counts. -func addAndResetCounts(hot, cold *histogramCounts) { - atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) - atomic.StoreUint64(&cold.count, 0) - coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) - atomicAddFloat(&hot.sumBits, coldSum) - atomic.StoreUint64(&cold.sumBits, 0) - for i := range hot.buckets { - atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) - atomic.StoreUint64(&cold.buckets[i], 0) - } - atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) - atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) -} - -type nativeExemplars struct { - sync.Mutex - - // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. - // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. - ttl time.Duration - - exemplars []*dto.Exemplar -} - -func (n *nativeExemplars) isEnabled() bool { - return n.ttl != -1 -} - -func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { - if ttl == 0 { - ttl = 5 * time.Minute - } - - if maxCount == 0 { - maxCount = 10 - } - - if maxCount < 0 { - maxCount = 0 - ttl = -1 - } - - return nativeExemplars{ - ttl: ttl, - exemplars: make([]*dto.Exemplar, 0, maxCount), - } -} - -func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { - if !n.isEnabled() { - return - } - - n.Lock() - defer n.Unlock() - - // When the number of exemplars has not yet exceeded or - // is equal to cap(n.exemplars), then - // insert the new exemplar directly. - if len(n.exemplars) < cap(n.exemplars) { - var nIdx int - for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { - if *e.Value < *n.exemplars[nIdx].Value { - break - } - } - n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) - return - } - - if len(n.exemplars) == 1 { - // When the number of exemplars is 1, then - // replace the existing exemplar with the new exemplar. - n.exemplars[0] = e - return - } - // From this point on, the number of exemplars is greater than 1. - - // When the number of exemplars exceeds the limit, remove one exemplar. - var ( - ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. - otIdx = -1 // Index of the exemplar with the oldest timestamp. - - md = -1.0 // Logarithm of the delta of the closest pair of exemplars. - - // The insertion point of the new exemplar in the exemplars slice after insertion. - // This is calculated purely based on the order of the exemplars by value. - // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. - nIdx = -1 - - // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. - // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. - // It is calculated in 3 steps: - // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. - // That is the following will be true (on log scale): - // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have - // the closest values to each other from all pairs. - // For example, suppose the values are distributed like this: - // |-----------x-------------x----------------x----x-----| - // ^--rIdx as this is older. - // Or like this: - // |-----------x-------------x----------------x----x-----| - // ^--rIdx as this is older. - // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. - // 3. We check if by inserting the new exemplar we would create a closer pair at - // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to - // keep the spread of exemplars by value; otherwise we keep rIdx as it is. - rIdx = -1 - cLog float64 // Logarithm of the current exemplar. - pLog float64 // Logarithm of the previous exemplar. - ) - - for i, exemplar := range n.exemplars { - // Find the exemplar with the oldest timestamp. - if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { - ot = exemplar.Timestamp.AsTime() - otIdx = i - } - - // Find the index at which to insert new the exemplar. - if nIdx == -1 && *e.Value <= *exemplar.Value { - nIdx = i - } - - // Find the two closest exemplars and pick the one the with older timestamp. - pLog = cLog - cLog = math.Log(exemplar.GetValue()) - if i == 0 { - continue - } - diff := math.Abs(cLog - pLog) - if md == -1 || diff < md { - // The closest exemplar pair is at index: i-1, i. - // Choose the exemplar with the older timestamp for replacement. - md = diff - if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { - rIdx = i - } else { - rIdx = i - 1 - } - } - - } - - // If all existing exemplar are smaller than new exemplar, - // then the exemplar should be inserted at the end. - if nIdx == -1 { - nIdx = len(n.exemplars) - } - // Here, we have the following relationships: - // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) - // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) - - if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { - // If the oldest exemplar has expired, then replace it with the new exemplar. - rIdx = otIdx - } else { - // In the previous for loop, when calculating the closest pair of exemplars, - // we did not take into account the newly inserted exemplar. - // So we need to calculate with the newly inserted exemplar again. - elog := math.Log(e.GetValue()) - if nIdx > 0 { - diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) - if diff < md { - // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. - // v--rIdx - // |-----------x-n-----------x----------------x----x-----| - // nIdx-1--^ ^--new exemplar value - // Do not make the spread worse, replace nIdx-1 and not rIdx. - md = diff - rIdx = nIdx - 1 - } - } - if nIdx < len(n.exemplars) { - diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) - if diff < md { - // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. - // v--rIdx - // |-----------x-----------n-x----------------x----x-----| - // new exemplar value--^ ^--nIdx - // Do not make the spread worse, replace nIdx-1 and not rIdx. - rIdx = nIdx - } - } - } - - // Adjust the slice according to rIdx and nIdx. - switch { - case rIdx == nIdx: - n.exemplars[nIdx] = e - case rIdx < nIdx: - n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) - case rIdx > nIdx: - n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) - } -} - -type constNativeHistogram struct { - desc *Desc - dto.Histogram - labelPairs []*dto.LabelPair -} - -func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { - var bucketPopulationSum int64 - for _, v := range positiveBuckets { - bucketPopulationSum += v - } - for _, v := range negativeBuckets { - bucketPopulationSum += v - } - bucketPopulationSum += int64(zeroBucket) - - // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. - // Otherwise, the number of observations must be equal to the sum of all bucket counts . - - if math.IsNaN(sum) && bucketPopulationSum > int64(count) || - !math.IsNaN(sum) && bucketPopulationSum != int64(count) { - return errors.New("the sum of all bucket populations exceeds the count of observations") - } - return nil -} - -// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with -// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// zeroBucket counts all (positive and negative) -// observations in the zero bucket (with an absolute value less or equal -// the current threshold). -// positiveBuckets and negativeBuckets are separate maps for negative and positive -// observations. The map's value is an int64, counting observations in -// that bucket. The map's key is the -// index of the bucket according to the used -// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. -// NewConstNativeHistogram returns an error if -// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. -// - the schema passed is not between 8 and -4 -// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) -// -// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. -func NewConstNativeHistogram( - desc *Desc, - count uint64, - sum float64, - positiveBuckets, negativeBuckets map[int]int64, - zeroBucket uint64, - schema int32, - zeroThreshold float64, - createdTimestamp time.Time, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { - return nil, errors.New("invalid native histogram schema") - } - if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { - return nil, err - } - - NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) - PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) - ret := &constNativeHistogram{ - desc: desc, - Histogram: dto.Histogram{ - CreatedTimestamp: timestamppb.New(createdTimestamp), - Schema: &schema, - ZeroThreshold: &zeroThreshold, - SampleCount: &count, - SampleSum: &sum, - - NegativeSpan: NegativeSpan, - NegativeDelta: NegativeDelta, - - PositiveSpan: PositiveSpan, - PositiveDelta: PositiveDelta, - - ZeroCount: proto.Uint64(zeroBucket), - }, - labelPairs: MakeLabelPairs(desc, labelValues), - } - if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { - ret.PositiveSpan = []*dto.BucketSpan{{ - Offset: proto.Int32(0), - Length: proto.Uint32(0), - }} - } - return ret, nil -} - -// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where -// NewConstNativeHistogram would have returned an error. -func MustNewConstNativeHistogram( - desc *Desc, - count uint64, - sum float64, - positiveBuckets, negativeBuckets map[int]int64, - zeroBucket uint64, - nativeHistogramSchema int32, - nativeHistogramZeroThreshold float64, - createdTimestamp time.Time, - labelValues ...string, -) Metric { - nativehistogram, err := NewConstNativeHistogram(desc, - count, - sum, - positiveBuckets, - negativeBuckets, - zeroBucket, - nativeHistogramSchema, - nativeHistogramZeroThreshold, - createdTimestamp, - labelValues...) - if err != nil { - panic(err) - } - return nativehistogram -} - -func (h *constNativeHistogram) Desc() *Desc { - return h.desc -} - -func (h *constNativeHistogram) Write(out *dto.Metric) error { - out.Histogram = &h.Histogram - out.Label = h.labelPairs - return nil -} - -func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { - if len(buckets) == 0 { - return nil, nil - } - var ii []int - for k := range buckets { - ii = append(ii, k) - } - sort.Ints(ii) - - var ( - spans []*dto.BucketSpan - deltas []int64 - prevCount int64 - nextI int - ) - - appendDelta := func(count int64) { - *spans[len(spans)-1].Length++ - deltas = append(deltas, count-prevCount) - prevCount = count - } - - for n, i := range ii { - count := buckets[i] - // Multiple spans with only small gaps in between are probably - // encoded more efficiently as one larger span with a few empty - // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one or two buckets should not create - // a new span. - iDelta := int32(i - nextI) - if n == 0 || iDelta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap - // of more than two buckets. - spans = append(spans, &dto.BucketSpan{ - Offset: proto.Int32(iDelta), - Length: proto.Uint32(0), - }) - } else { - // We have found a small gap (or no gap at all). - // Insert empty buckets as needed. - for j := int32(0); j < iDelta; j++ { - appendDelta(0) - } - } - appendDelta(count) - nextI = i + 1 - } - return spans, deltas -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go deleted file mode 100644 index 1ed5abe74c..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2015 Björn Rabenstein -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -// -// The code in this package is copy/paste to avoid a dependency. Hence this file -// carries the copyright of the original repo. -// https://github.com/beorn7/floats -package internal - -import ( - "math" -) - -// minNormalFloat64 is the smallest positive normal value of type float64. -var minNormalFloat64 = math.Float64frombits(0x0010000000000000) - -// AlmostEqualFloat64 returns true if a and b are equal within a relative error -// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the -// details of the applied method. -func AlmostEqualFloat64(a, b, epsilon float64) bool { - if a == b { - return true - } - absA := math.Abs(a) - absB := math.Abs(b) - diff := math.Abs(a - b) - if a == 0 || b == 0 || absA+absB < minNormalFloat64 { - return diff < epsilon*minNormalFloat64 - } - return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon -} - -// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. -func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !AlmostEqualFloat64(a[i], b[i], epsilon) { - return false - } - } - return true -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go deleted file mode 100644 index 8b016355ad..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ /dev/null @@ -1,655 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no longer maintained. -package internal - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" -) - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool, -) *SequenceMatcher { - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// -// and for all (i',j',k') meeting those conditions, -// -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize++ - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize++ - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{ - c.Tag, i1, minInt(i2, i1+n), - j1, minInt(j2, j1+n), - }) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s]++ - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches++ - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(minInt(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return strconv.Itoa(beginning) - } - if length == 0 { - beginning-- // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return w.String(), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go deleted file mode 100644 index a4fa6eabd7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import "regexp" - -type GoCollectorRule struct { - Matcher *regexp.Regexp - Deny bool -} - -// GoCollectorOptions should not be used be directly by anything, except `collectors` package. -// Use it via collectors package instead. See issue -// https://github.com/prometheus/client_golang/issues/1030. -// -// This is internal, so external users only can use it via `collector.WithGoCollector*` methods -type GoCollectorOptions struct { - DisableMemStatsLikeMetrics bool - RuntimeMetricSumForHist map[string]string - RuntimeMetricRules []GoCollectorRule -} - -var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go deleted file mode 100644 index f7f97ef926..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package internal - -import ( - "math" - "path" - "runtime/metrics" - "strings" - - "github.com/prometheus/common/model" -) - -// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics -// metric description and validates whether the metric is suitable for integration -// with Prometheus. -// -// Returns false if a name could not be produced, or if Prometheus does not understand -// the runtime/metrics Kind. -// -// Note that the main reason a name couldn't be produced is if the runtime/metrics -// package exports a name with characters outside the valid Prometheus metric name -// character set. This is theoretically possible, but should never happen in practice. -// Still, don't rely on it. -func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) { - namespace := "go" - - comp := strings.SplitN(d.Name, ":", 2) - key := comp[0] - unit := comp[1] - - // The last path element in the key is the name, - // the rest is the subsystem. - subsystem := path.Dir(key[1:] /* remove leading / */) - name := path.Base(key) - - // subsystem is translated by replacing all / and - with _. - subsystem = strings.ReplaceAll(subsystem, "/", "_") - subsystem = strings.ReplaceAll(subsystem, "-", "_") - - // unit is translated assuming that the unit contains no - // non-ASCII characters. - unit = strings.ReplaceAll(unit, "-", "_") - unit = strings.ReplaceAll(unit, "*", "_") - unit = strings.ReplaceAll(unit, "/", "_per_") - - // name has - replaced with _ and is concatenated with the unit and - // other data. - name = strings.ReplaceAll(name, "-", "_") - name += "_" + unit - if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { - name += "_total" - } - - // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) - switch d.Kind { - case metrics.KindUint64: - case metrics.KindFloat64: - case metrics.KindFloat64Histogram: - default: - valid = false - } - return namespace, subsystem, name, valid -} - -// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram -// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces -// a reduced set of buckets. This function always removes any -Inf bucket as it's represented -// as the bottom-most upper-bound inclusive bucket in Prometheus. -func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { - switch unit { - case "bytes": - // Re-bucket as powers of 2. - return reBucketExp(buckets, 2) - case "seconds": - // Re-bucket as powers of 10 and then merge all buckets greater - // than 1 second into the +Inf bucket. - b := reBucketExp(buckets, 10) - for i := range b { - if b[i] <= 1 { - continue - } - b[i] = math.Inf(1) - b = b[:i+1] - break - } - return b - } - return buckets -} - -// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and -// downsamples the buckets to those a multiple of base apart. The end result -// is a roughly exponential (in many cases, perfectly exponential) bucketing -// scheme. -func reBucketExp(buckets []float64, base float64) []float64 { - bucket := buckets[0] - var newBuckets []float64 - // We may see a -Inf here, in which case, add it and skip it - // since we risk producing NaNs otherwise. - // - // We need to preserve -Inf values to maintain runtime/metrics - // conventions. We'll strip it out later. - if bucket == math.Inf(-1) { - newBuckets = append(newBuckets, bucket) - buckets = buckets[1:] - bucket = buckets[0] - } - // From now on, bucket should always have a non-Inf value because - // Infs are only ever at the ends of the bucket lists, so - // arithmetic operations on it are non-NaN. - for i := 1; i < len(buckets); i++ { - if bucket >= 0 && buckets[i] < bucket*base { - // The next bucket we want to include is at least bucket*base. - continue - } else if bucket < 0 && buckets[i] < bucket/base { - // In this case the bucket we're targeting is negative, and since - // we're ascending through buckets here, we need to divide to get - // closer to zero exponentially. - continue - } - // The +Inf bucket will always be the last one, and we'll always - // end up including it here because bucket - newBuckets = append(newBuckets, bucket) - bucket = buckets[i] - } - return append(newBuckets, bucket) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 6515c11480..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type LabelPairSorter []*dto.LabelPair - -func (s LabelPairSorter) Len() int { - return len(s) -} - -func (s LabelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s LabelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -// MetricSorter is a sortable slice of *dto.Metric. -type MetricSorter []*dto.Metric - -func (s MetricSorter) Len() int { - return len(s) -} - -func (s MetricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s MetricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(MetricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index c21911f292..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// LabelConstraint normalizes label values. -type LabelConstraint func(string) string - -// ConstrainedLabels represents a label name and its constrain function -// to normalize label values. This type is commonly used when constructing -// metric vector Collectors. -type ConstrainedLabel struct { - Name string - Constraint LabelConstraint -} - -// ConstrainableLabels is an interface that allows creating of labels that can -// be optionally constrained. -// -// prometheus.V2().NewCounterVec(CounterVecOpts{ -// CounterOpts: {...}, // Usual CounterOpts fields -// VariableLabels: []ConstrainedLabels{ -// {Name: "A"}, -// {Name: "B", Constraint: func(v string) string { ... }}, -// }, -// }) -type ConstrainableLabels interface { - compile() *compiledLabels - labelNames() []string -} - -// ConstrainedLabels represents a collection of label name -> constrain function -// to normalize label values. This type is commonly used when constructing -// metric vector Collectors. -type ConstrainedLabels []ConstrainedLabel - -func (cls ConstrainedLabels) compile() *compiledLabels { - compiled := &compiledLabels{ - names: make([]string, len(cls)), - labelConstraints: map[string]LabelConstraint{}, - } - - for i, label := range cls { - compiled.names[i] = label.Name - if label.Constraint != nil { - compiled.labelConstraints[label.Name] = label.Constraint - } - } - - return compiled -} - -func (cls ConstrainedLabels) labelNames() []string { - names := make([]string, len(cls)) - for i, label := range cls { - names[i] = label.Name - } - return names -} - -// UnconstrainedLabels represents collection of label without any constraint on -// their value. Thus, it is simply a collection of label names. -// -// UnconstrainedLabels([]string{ "A", "B" }) -// -// is equivalent to -// -// ConstrainedLabels { -// { Name: "A" }, -// { Name: "B" }, -// } -type UnconstrainedLabels []string - -func (uls UnconstrainedLabels) compile() *compiledLabels { - return &compiledLabels{ - names: uls, - } -} - -func (uls UnconstrainedLabels) labelNames() []string { - return uls -} - -type compiledLabels struct { - names []string - labelConstraints map[string]LabelConstraint -} - -func (cls *compiledLabels) compile() *compiledLabels { - return cls -} - -func (cls *compiledLabels) labelNames() []string { - return cls.names -} - -func (cls *compiledLabels) constrain(labelName, value string) string { - if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { - return fn(value) - } - return value -} - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%w: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%w: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - // The call below makes vals escape, copy them to avoid that. - vals := append([]string(nil), vals...) - return fmt.Errorf( - "%w: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index 592eec3e24..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sort" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" - "google.golang.org/protobuf/proto" -) - -var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // now is for testing purposes, by default it's time.Now. - now func() time.Time -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - - sb := strings.Builder{} - sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) - - if namespace != "" { - sb.WriteString(namespace) - sb.WriteString("_") - } - - if subsystem != "" { - sb.WriteString(subsystem) - sb.WriteString("_") - } - - sb.WriteString(name) - - return sb.String() -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} - -type withExemplarsMetric struct { - Metric - - exemplars []*dto.Exemplar -} - -func (m *withExemplarsMetric) Write(pb *dto.Metric) error { - if err := m.Metric.Write(pb); err != nil { - return err - } - - switch { - case pb.Counter != nil: - pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] - case pb.Histogram != nil: - for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() - }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e - } else { - // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e, - } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) - } - } - default: - // TODO(bwplotka): Implement Gauge? - return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") - } - - return nil -} - -// Exemplar is easier to use, user-facing representation of *dto.Exemplar. -type Exemplar struct { - Value float64 - Labels Labels - // Optional. - // Default value (time.Time{}) indicates its empty, which should be - // understood as time.Now() time at the moment of creation of metric. - Timestamp time.Time -} - -// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given -// exemplars. Exemplars are validated. -// -// Only last applicable exemplar is injected from the list. -// For example for Counter it means last exemplar is injected. -// For Histogram, it means last applicable exemplar for each bucket is injected. -// -// NewMetricWithExemplars works best with MustNewConstMetric and -// MustNewConstHistogram, see example. -func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { - if len(exemplars) == 0 { - return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") - } - - var ( - now = time.Now() - exs = make([]*dto.Exemplar, len(exemplars)) - err error - ) - for i, e := range exemplars { - ts := e.Timestamp - if ts.IsZero() { - ts = now - } - exs[i], err = newExemplar(e.Value, ts, e.Labels) - if err != nil { - return nil, err - } - } - - return &withExemplarsMetric{Metric: m, exemplars: exs}, nil -} - -// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where -// NewMetricWithExemplars would have returned an error. -func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { - ret, err := NewMetricWithExemplars(m, exemplars...) - if err != nil { - panic(err) - } - return ret -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go deleted file mode 100644 index 7c12b21087..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !js || wasm -// +build !js wasm - -package prometheus - -import "runtime" - -// getRuntimeNumThreads returns the number of open OS threads. -func getRuntimeNumThreads() float64 { - n, _ := runtime.ThreadCreateProfile(nil) - return float64(n) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go deleted file mode 100644 index 7348df01df..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js && !wasm -// +build js,!wasm - -package prometheus - -// getRuntimeNumThreads returns the number of open OS threads. -func getRuntimeNumThreads() float64 { - return 1 -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 03773b21f7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} - -// ExemplarObserver is implemented by Observers that offer the option of -// observing a value together with an exemplar. Its ObserveWithExemplar method -// works like the Observe method of an Observer but also replaces the currently -// saved exemplar (if any) with a new one, created from the provided value, the -// current time as timestamp, and the provided Labels. Empty Labels will lead to -// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is -// left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 128 runes in total. -type ExemplarObserver interface { - ObserveWithExemplar(value float64, exemplar Labels) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index e7bce8b58e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "os" - "strconv" - "strings" -) - -type processCollector struct { - collectFn func(chan<- Metric) - describeFn func(chan<- *Desc) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc - inBytes, outBytes *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector is the obsolete version of collectors.NewProcessCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewProcessCollector instead. -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - inBytes: NewDesc( - ns+"process_network_receive_bytes_total", - "Number of bytes received by the process over the network.", - nil, nil, - ), - outBytes: NewDesc( - ns+"process_network_transmit_bytes_total", - "Number of bytes sent by the process over the network.", - nil, nil, - ), - } - - if opts.PidFn == nil { - c.pidFn = getPIDFn() - } else { - c.pidFn = opts.PidFn - } - - // Set up process metric collection if supported by the runtime. - if canCollectProcess() { - c.collectFn = c.processCollect - c.describeFn = c.describe - } else { - c.collectFn = c.errorCollectFn - c.describeFn = c.errorDescribeFn - } - - return c -} - -func (c *processCollector) errorCollectFn(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) -} - -func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { - if c.reportErrors { - ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - c.describeFn(ch) -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) - } - ch <- NewInvalidMetric(desc, err) -} - -// NewPidFileFn returns a function that retrieves a pid from the specified file. -// It is meant to be used for the PidFn field in ProcessCollectorOpts. -func NewPidFileFn(pidFilePath string) func() (int, error) { - return func() (int, error) { - content, err := os.ReadFile(pidFilePath) - if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) - } - pid, err := strconv.Atoi(strings.TrimSpace(string(content))) - if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) - } - - return pid, nil - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go deleted file mode 100644 index 0a61b98461..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios - -package prometheus - -import ( - "errors" - "fmt" - "os" - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo -// isn't available. -var notImplementedErr = errors.New("not implemented") - -type memoryInfo struct { - vsize uint64 // Virtual memory size in bytes - rss uint64 // Resident memory size in bytes -} - -func canCollectProcess() bool { - return true -} - -func getSoftLimit(which int) (uint64, error) { - rlimit := syscall.Rlimit{} - - if err := syscall.Getrlimit(which, &rlimit); err != nil { - return 0, err - } - - return rlimit.Cur, nil -} - -func getOpenFileCount() (float64, error) { - // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to - // return a list of open fds, but that requires a way to call C APIs. The - // benefits, however, include fewer system calls and not failing when at the - // open file soft limit. - - if dir, err := os.Open("/dev/fd"); err != nil { - return 0.0, err - } else { - defer dir.Close() - - // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is - // that info not used, but KQUEUE descriptors fail stat(2), which causes - // the whole method to fail. - if names, err := dir.Readdirnames(0); err != nil { - return 0.0, err - } else { - // Subtract 1 to ignore the open /dev/fd descriptor above. - return float64(len(names) - 1), nil - } - } -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { - if len(procs) == 1 { - startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, c.startTime, err) - } - - // The proc structure returned by kern.proc.pid above has an Rusage member, - // but it is not filled in, so it needs to be fetched by getrusage(2). For - // that call, the UTime, STime, and Maxrss members are filled out, but not - // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require - // access to the C API to call task_info(TASK_BASIC_INFO). - rusage := unix.Rusage{} - - if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { - cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) - } else { - c.reportError(ch, c.cpuTotal, err) - } - - if memInfo, err := getMemory(); err == nil { - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { - // Don't report an error when support is not compiled in. - c.reportError(ch, c.rss, err) - c.reportError(ch, c.vsize, err) - } - - if fds, err := getOpenFileCount(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) - } else { - c.reportError(ch, c.openFDs, err) - } - - if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) - } else { - c.reportError(ch, c.maxFDs, err) - } - - if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) - } else { - c.reportError(ch, c.maxVsize, err) - } - - // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might - // be able to get the per-process network send/receive counts. -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c deleted file mode 100644 index d00a24315d..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios && cgo - -#include -#include -#include - -// The compiler warns that mach/shared_memory_server.h is deprecated, and to use -// mach/shared_region.h instead. But that doesn't define -// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and -// avoid a warning message when running tests. -#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U -#define SHARED_DATA_REGION_SIZE 0x10000000 -#define SHARED_TEXT_REGION_SIZE 0x10000000 - - -int get_memory_info(unsigned long long *rss, unsigned long long *vsize) -{ - // This is lightly adapted from how ps(1) obtains its memory info. - // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 - - kern_return_t error; - task_t task = MACH_PORT_NULL; - mach_task_basic_info_data_t info; - mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; - - error = task_info( - mach_task_self(), - MACH_TASK_BASIC_INFO, - (task_info_t) &info, - &info_count ); - - if( error != KERN_SUCCESS ) - { - return error; - } - - *rss = info.resident_size; - *vsize = info.virtual_size; - - { - vm_region_basic_info_data_64_t b_info; - mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; - mach_vm_size_t size; - mach_port_t object_name; - - /* - * try to determine if this task has the split libraries - * mapped in... if so, adjust its virtual size down by - * the 2 segments that are used for split libraries - */ - info_count = VM_REGION_BASIC_INFO_COUNT_64; - - error = mach_vm_region( - mach_task_self(), - &address, - &size, - VM_REGION_BASIC_INFO_64, - (vm_region_info_t) &b_info, - &info_count, - &object_name); - - if (error == KERN_SUCCESS) { - if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && - *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { - *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); - } - } - } - - return 0; -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go deleted file mode 100644 index 9ac53f9992..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios && cgo - -package prometheus - -/* -int get_memory_info(unsigned long long *rss, unsigned long long *vs); -*/ -import "C" -import "fmt" - -func getMemory() (*memoryInfo, error) { - var rss, vsize C.ulonglong - - if err := C.get_memory_info(&rss, &vsize); err != 0 { - return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) - } - - return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil -} - -// describe returns all descriptions of the collector for Darwin. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.maxVsize - ch <- c.startTime - ch <- c.rss - ch <- c.vsize - - /* the process could be collected but not implemented yet - ch <- c.inBytes - ch <- c.outBytes - */ -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go deleted file mode 100644 index 8ddb0995d6..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios && !cgo - -package prometheus - -func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr -} - -// describe returns all descriptions of the collector for Darwin. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.maxVsize - ch <- c.startTime - - /* the process could be collected but not implemented yet - ch <- c.rss - ch <- c.vsize - ch <- c.inBytes - ch <- c.outBytes - */ -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go deleted file mode 100644 index 7732b7f376..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build wasip1 || js || ios -// +build wasip1 js ios - -package prometheus - -func canCollectProcess() bool { - return false -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - c.errorCollectFn(ch) -} - -// describe returns all descriptions of the collector for wasip1 and js. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - c.errorDescribeFn(ch) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go deleted file mode 100644 index 9f4b130bef..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !js && !wasip1 && !darwin -// +build !windows,!js,!wasip1,!darwin - -package prometheus - -import ( - "github.com/prometheus/procfs" -) - -func canCollectProcess() bool { - _, err := procfs.NewDefaultFS() - return err == nil -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.Stat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.Limits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } - - if netstat, err := p.Netstat(); err == nil { - var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets - } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets - } - ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) - ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) - } else { - c.reportError(ch, nil, err) - } -} - -// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime - ch <- c.inBytes - ch <- c.outBytes -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go deleted file mode 100644 index fa474289ef..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -func canCollectProcess() bool { - return true -} - -var ( - modpsapi = syscall.NewLazyDLL("psapi.dll") - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") - procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") -) - -type processMemoryCounters struct { - // System interface description - // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex - - // Refer to the Golang internal implementation - // https://golang.org/src/internal/syscall/windows/psapi_windows.go - _ uint32 - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr -} - -func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { - mem := processMemoryCounters{} - r1, _, err := procGetProcessMemoryInfo.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&mem)), - uintptr(unsafe.Sizeof(mem)), - ) - if r1 != 1 { - return mem, err - } else { - return mem, nil - } -} - -func getProcessHandleCount(handle windows.Handle) (uint32, error) { - var count uint32 - r1, _, err := procGetProcessHandleCount.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&count)), - ) - if r1 != 1 { - return 0, err - } else { - return count, nil - } -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - h := windows.CurrentProcess() - - var startTime, exitTime, kernelTime, userTime windows.Filetime - err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) - - mem, err := getProcessMemoryInfo(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) - - handles, err := getProcessHandleCount(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. -} - -// describe returns all descriptions of the collector for windows. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.rss - ch <- c.startTime -} - -func fileTimeToSeconds(ft windows.Filetime) float64 { - return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go deleted file mode 100644 index 315eab5f17..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "bufio" - "io" - "net" - "net/http" -) - -const ( - closeNotifier = 1 << iota - flusher - hijacker - readerFrom - pusher -) - -type delegator interface { - http.ResponseWriter - - Status() int - Written() int64 -} - -type responseWriterDelegator struct { - http.ResponseWriter - - status int - written int64 - wroteHeader bool - observeWriteHeader func(int) -} - -func (r *responseWriterDelegator) Status() int { - return r.status -} - -func (r *responseWriterDelegator) Written() int64 { - return r.written -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - if r.observeWriteHeader != nil && !r.wroteHeader { - // Only call observeWriteHeader for the 1st time. It's a bug if - // WriteHeader is called more than once, but we want to protect - // against it here. Note that we still delegate the WriteHeader - // to the original ResponseWriter to not mask the bug from it. - r.observeWriteHeader(code) - } - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, -// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. -func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { - return r.ResponseWriter -} - -type ( - closeNotifierDelegator struct{ *responseWriterDelegator } - flusherDelegator struct{ *responseWriterDelegator } - hijackerDelegator struct{ *responseWriterDelegator } - readerFromDelegator struct{ *responseWriterDelegator } - pusherDelegator struct{ *responseWriterDelegator } -) - -func (d closeNotifierDelegator) CloseNotify() <-chan bool { - //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. - return d.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (d flusherDelegator) Flush() { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - d.ResponseWriter.(http.Flusher).Flush() -} - -func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return d.ResponseWriter.(http.Hijacker).Hijack() -} - -func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) - d.written += n - return n, err -} - -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) - -func init() { - // TODO(beorn7): Code generation would help here. - pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 - return d - } - pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return closeNotifierDelegator{d} - } - pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return flusherDelegator{d} - } - pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 - return struct { - *responseWriterDelegator - http.Flusher - http.CloseNotifier - }{d, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return hijackerDelegator{d} - } - pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 - return struct { - *responseWriterDelegator - http.Hijacker - http.CloseNotifier - }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - }{d, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 - return readerFromDelegator{d} - } - pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.CloseNotifier - }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - }{d, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - }{d, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go deleted file mode 100644 index 763d99e362..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promhttp provides tooling around HTTP servers and clients. -// -// First, the package allows the creation of http.Handler instances to expose -// Prometheus metrics via HTTP. promhttp.Handler acts on the -// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a -// custom registry or anything that implements the Gatherer interface. It also -// allows the creation of handlers that act differently on errors or allow to -// log errors. -// -// Second, the package provides tooling to instrument instances of http.Handler -// via middleware. Middleware wrappers follow the naming scheme -// InstrumentHandlerX, where X describes the intended use of the middleware. -// See each function's doc comment for specific details. -// -// Finally, the package allows for an http.RoundTripper to be instrumented via -// middleware. Middleware wrappers follow the naming scheme -// InstrumentRoundTripperX, where X describes the intended use of the -// middleware. See each function's doc comment for specific details. -package promhttp - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "net/http" - "strconv" - "sync" - "time" - - "github.com/prometheus/common/expfmt" - - "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp/internal" -) - -const ( - contentTypeHeader = "Content-Type" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" - processStartTimeHeader = "Process-Start-Time-Unix" -) - -// Compression represents the content encodings handlers support for the HTTP -// responses. -type Compression string - -const ( - Identity Compression = "identity" - Gzip Compression = "gzip" - Zstd Compression = "zstd" -) - -func defaultCompressionFormats() []Compression { - if internal.NewZstdWriter != nil { - return []Compression{Identity, Gzip, Zstd} - } else { - return []Compression{Identity, Gzip} - } -} - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an http.Handler for the prometheus.DefaultGatherer, using -// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has -// no error logging, and it applies compression if requested by the client. -// -// The returned http.Handler is already instrumented using the -// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you -// create multiple http.Handlers by separate calls of the Handler function, the -// metrics used for instrumentation will be shared between them, providing -// global scrape counts. -// -// This function is meant to cover the bulk of basic use cases. If you are doing -// anything that requires more customization (including using a non-default -// Gatherer, different instrumentation, and non-default HandlerOpts), use the -// HandlerFor function. See there for details. -func Handler() http.Handler { - return InstrumentMetricHandler( - prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), - ) -} - -// HandlerFor returns an uninstrumented http.Handler for the provided -// Gatherer. The behavior of the Handler is defined by the provided -// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom -// Gatherers, with non-default HandlerOpts, and/or with custom (or no) -// instrumentation. Use the InstrumentMetricHandler function to apply the same -// kind of instrumentation as it is used by the Handler function. -func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) -} - -// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which -// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after -// call to `done` of that `Gather`. -func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { - var ( - inFlightSem chan struct{} - errCnt = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_errors_total", - Help: "Total number of internal errors encountered by the promhttp metric handler.", - }, - []string{"cause"}, - ) - ) - - if opts.MaxRequestsInFlight > 0 { - inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) - } - if opts.Registry != nil { - // Initialize all possibilities that can occur below. - errCnt.WithLabelValues("gathering") - errCnt.WithLabelValues("encoding") - if err := opts.Registry.Register(errCnt); err != nil { - are := &prometheus.AlreadyRegisteredError{} - if errors.As(err, are) { - errCnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - } - - // Select compression formats to offer based on default or user choice. - var compressions []string - if !opts.DisableCompression { - offers := defaultCompressionFormats() - if len(opts.OfferedCompressions) > 0 { - offers = opts.OfferedCompressions - } - for _, comp := range offers { - compressions = append(compressions, string(comp)) - } - } - - h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - if !opts.ProcessStartTime.IsZero() { - rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) - } - if inFlightSem != nil { - select { - case inFlightSem <- struct{}{}: // All good, carry on. - defer func() { <-inFlightSem }() - default: - http.Error(rsp, fmt.Sprintf( - "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, - ), http.StatusServiceUnavailable) - return - } - } - mfs, done, err := reg.Gather() - defer done() - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error gathering metrics:", err) - } - errCnt.WithLabelValues("gathering").Inc() - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - if len(mfs) == 0 { - // Still report the error if no metrics have been gathered. - httpError(rsp, err) - return - } - case HTTPErrorOnError: - httpError(rsp, err) - return - } - } - - var contentType expfmt.Format - if opts.EnableOpenMetrics { - contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) - } else { - contentType = expfmt.Negotiate(req.Header) - } - rsp.Header().Set(contentTypeHeader, string(contentType)) - - w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error getting writer", err) - } - w = io.Writer(rsp) - encodingHeader = string(Identity) - } - - defer closeWriter() - - // Set Content-Encoding only when data is compressed - if encodingHeader != string(Identity) { - rsp.Header().Set(contentEncodingHeader, encodingHeader) - } - - var enc expfmt.Encoder - if opts.EnableOpenMetricsTextCreatedSamples { - enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines()) - } else { - enc = expfmt.NewEncoder(w, contentType) - } - - // handleError handles the error according to opts.ErrorHandling - // and returns true if we have to abort after the handling. - handleError := func(err error) bool { - if err == nil { - return false - } - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding and sending metric family:", err) - } - errCnt.WithLabelValues("encoding").Inc() - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case HTTPErrorOnError: - // We cannot really send an HTTP error at this - // point because we most likely have written - // something to rsp already. But at least we can - // stop sending. - return true - } - // Do nothing in all other cases, including ContinueOnError. - return false - } - - for _, mf := range mfs { - if handleError(enc.Encode(mf)) { - return - } - } - if closer, ok := enc.(expfmt.Closer); ok { - // This in particular takes care of the final "# EOF\n" line for OpenMetrics. - if handleError(closer.Close()) { - return - } - } - }) - - if opts.Timeout <= 0 { - return h - } - return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( - "Exceeded configured timeout of %v.\n", - opts.Timeout, - )) -} - -// InstrumentMetricHandler is usually used with an http.Handler returned by the -// HandlerFor function. It instruments the provided http.Handler with two -// metrics: A counter vector "promhttp_metric_handler_requests_total" to count -// scrapes partitioned by HTTP status code, and a gauge -// "promhttp_metric_handler_requests_in_flight" to track the number of -// simultaneous scrapes. This function idempotently registers collectors for -// both metrics with the provided Registerer. It panics if the registration -// fails. The provided metrics are useful to see how many scrapes hit the -// monitored target (which could be from different Prometheus servers or other -// scrapers), and how often they overlap (which would result in more than one -// scrape in flight at the same time). Note that the scrapes-in-flight gauge -// will contain the scrape by which it is exposed, while the scrape counter will -// only get incremented after the scrape is complete (as only then the status -// code is known). For tracking scrape durations, use the -// "scrape_duration_seconds" gauge created by the Prometheus server upon each -// scrape. -func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { - cnt := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_requests_total", - Help: "Total number of scrapes by HTTP status code.", - }, - []string{"code"}, - ) - // Initialize the most likely HTTP status codes. - cnt.WithLabelValues("200") - cnt.WithLabelValues("500") - cnt.WithLabelValues("503") - if err := reg.Register(cnt); err != nil { - are := &prometheus.AlreadyRegisteredError{} - if errors.As(err, are) { - cnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - - gge := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "promhttp_metric_handler_requests_in_flight", - Help: "Current number of scrapes being served.", - }) - if err := reg.Register(gge); err != nil { - are := &prometheus.AlreadyRegisteredError{} - if errors.As(err, are) { - gge = are.ExistingCollector.(prometheus.Gauge) - } else { - panic(err) - } - } - - return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) -} - -// HandlerErrorHandling defines how a Handler serving metrics will handle -// errors. -type HandlerErrorHandling int - -// These constants cause handlers serving metrics to behave as described if -// errors are encountered. -const ( - // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. Note that HTTP - // errors cannot be served anymore once the beginning of a regular - // payload has been sent. Thus, in the (unlikely) case that encoding the - // payload into the negotiated wire format fails, serving the response - // will simply be aborted. Set an ErrorLog in HandlerOpts to detect - // those errors. - HTTPErrorOnError HandlerErrorHandling = iota - // Ignore errors and try to serve as many metrics as possible. However, - // if no metrics can be served, serve an HTTP status code 500 and the - // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. In this case, it is highly - // recommended to provide other means of detecting errors: By setting an - // ErrorLog in HandlerOpts, the errors are logged. By providing a - // Registry in HandlerOpts, the exposed metrics include an error counter - // "promhttp_metric_handler_errors_total", which can be used for - // alerts. - ContinueOnError - // Panic upon the first error encountered (useful for "crash only" apps). - PanicOnError -) - -// Logger is the minimal interface HandlerOpts needs for logging. Note that -// log.Logger from the standard library implements this interface, and it is -// easy to implement by custom loggers, if they don't do so already anyway. -type Logger interface { - Println(v ...interface{}) -} - -// HandlerOpts specifies options how to serve metrics via an http.Handler. The -// zero value of HandlerOpts is a reasonable default. -type HandlerOpts struct { - // ErrorLog specifies an optional Logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. Note that the - // type of a reported error is often prometheus.MultiError, which - // formats into a multi-line error string. If you want to avoid the - // latter, create a Logger implementation that detects a - // prometheus.MultiError and formats the contained errors into one line. - ErrorLog Logger - // ErrorHandling defines how errors are handled. Note that errors are - // logged regardless of the configured ErrorHandling provided ErrorLog - // is not nil. - ErrorHandling HandlerErrorHandling - // If Registry is not nil, it is used to register a metric - // "promhttp_metric_handler_errors_total", partitioned by "cause". A - // failed registration causes a panic. Note that this error counter is - // different from the instrumentation you get from the various - // InstrumentHandler... helpers. It counts errors that don't necessarily - // result in a non-2xx HTTP status code. There are two typical cases: - // (1) Encoding errors that only happen after streaming of the HTTP body - // has already started (and the status code 200 has been sent). This - // should only happen with custom collectors. (2) Collection errors with - // no effect on the HTTP status code because ErrorHandling is set to - // ContinueOnError. - Registry prometheus.Registerer - // DisableCompression disables the response encoding (compression) and - // encoding negotiation. If true, the handler will - // never compress the response, even if requested - // by the client and the OfferedCompressions field is set. - DisableCompression bool - // OfferedCompressions is a set of encodings (compressions) handler will - // try to offer when negotiating with the client. This defaults to identity, gzip - // and zstd. - // NOTE: If handler can't agree with the client on the encodings or - // unsupported or empty encodings are set in OfferedCompressions, - // handler always fallbacks to no compression (identity), for - // compatibility reasons. In such cases ErrorLog will be used if set. - OfferedCompressions []Compression - // The number of concurrent HTTP requests is limited to - // MaxRequestsInFlight. Additional requests are responded to with 503 - // Service Unavailable and a suitable message in the body. If - // MaxRequestsInFlight is 0 or negative, no limit is applied. - MaxRequestsInFlight int - // If handling a request takes longer than Timeout, it is responded to - // with 503 ServiceUnavailable and a suitable Message. No timeout is - // applied if Timeout is 0 or negative. Note that with the current - // implementation, reaching the timeout simply ends the HTTP requests as - // described above (and even that only if sending of the body hasn't - // started yet), while the bulk work of gathering all the metrics keeps - // running in the background (with the eventual result to be thrown - // away). Until the implementation is improved, it is recommended to - // implement a separate timeout in potentially slow Collectors. - Timeout time.Duration - // If true, the experimental OpenMetrics encoding is added to the - // possible options during content negotiation. Note that Prometheus - // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is - // the only way to transmit exemplars. However, the move to OpenMetrics - // is not completely transparent. Most notably, the values of "quantile" - // labels of Summaries and "le" labels of Histograms are formatted with - // a trailing ".0" if they would otherwise look like integer numbers - // (which changes the identity of the resulting series on the Prometheus - // server). - EnableOpenMetrics bool - // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic - // Created Timestamps for counters, histograms and summaries, which for the current - // version of OpenMetrics are defined as extra series with the same name and "_created" - // suffix. See also the OpenMetrics specification for more details - // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1 - // - // Created timestamps are used to improve the accuracy of reset detection, - // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality - // if the scraper does not handle those metrics correctly (converting to created timestamp - // instead of leaving those series as-is). New OpenMetrics versions might improve - // this situation. - // - // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion' - // in version 2.50.0 to handle this situation. - EnableOpenMetricsTextCreatedSamples bool - // ProcessStartTime allows setting process start timevalue that will be exposed - // with "Process-Start-Time-Unix" response header along with the metrics - // payload. This allow callers to have efficient transformations to cumulative - // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per - // scrape target. - // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus - // exposition format. - ProcessStartTime time.Time -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerError. Error contents is -// supposed to be uncompressed plain text. Same as with a plain http.Error, this -// must not be called if the header or any payload has already been sent. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} - -// negotiateEncodingWriter reads the Accept-Encoding header from a request and -// selects the right compression based on an allow-list of supported -// compressions. It returns a writer implementing the compression and an the -// correct value that the caller can set in the response header. -func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { - if len(compressions) == 0 { - return rw, string(Identity), func() {}, nil - } - - // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. - selected := httputil.NegotiateContentEncoding(r, compressions) - - switch selected { - case "zstd": - if internal.NewZstdWriter == nil { - // The content encoding was not implemented yet. - return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) - } - writer, closeWriter, err := internal.NewZstdWriter(rw) - return writer, selected, closeWriter, err - case "gzip": - gz := gzipPool.Get().(*gzip.Writer) - gz.Reset(rw) - return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil - case "identity": - // This means the content is not compressed. - return rw, selected, func() {}, nil - default: - // The content encoding was not implemented yet. - return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go deleted file mode 100644 index d3482c40ca..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// The RoundTripperFunc type is an adapter to allow the use of ordinary -// functions as RoundTrippers. If f is a function with the appropriate -// signature, RountTripperFunc(f) is a RoundTripper that calls f. -type RoundTripperFunc func(req *http.Request) (*http.Response, error) - -// RoundTrip implements the RoundTripper interface. -func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return rt(r) -} - -// InstrumentRoundTripperInFlight is a middleware that wraps the provided -// http.RoundTripper. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.RoundTripper. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return func(r *http.Request) (*http.Response, error) { - gauge.Inc() - defer gauge.Dec() - return next.RoundTrip(r) - } -} - -// InstrumentRoundTripperCounter is a middleware that wraps the provided -// http.RoundTripper to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. For the "method" label a predefined default label value set -// is used to filter given values. Values besides predefined values will count -// as `unknown` method.`WithExtraMethods` can be used to add more -// methods to the set. Partitioning of the CounterVec happens by HTTP status code -// and/or HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped RoundTripper panics or returns a non-nil error, the Counter -// is not incremented. -// -// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := defaultOptions() - for _, o := range opts { - o.apply(rtOpts) - } - - // Curry the counter with dynamic labels before checking the remaining labels. - code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels())) - - return func(r *http.Request) (*http.Response, error) { - resp, err := next.RoundTrip(r) - if err == nil { - l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) - for label, resolve := range rtOpts.extraLabelsFromCtx { - l[label] = resolve(resp.Request.Context()) - } - addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context())) - } - return resp, err - } -} - -// InstrumentRoundTripperDuration is a middleware that wraps the provided -// http.RoundTripper to observe the request duration with the provided -// ObserverVec. The ObserverVec must have zero, one, or two non-const -// non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. For the "method" label a predefined -// default label value set is used to filter given values. Values besides -// predefined values will count as `unknown` method. `WithExtraMethods` -// can be used to add more methods to the set. The Observe method of the Observer -// in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped RoundTripper panics or returns a non-nil error, no values are -// reported. -// -// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := defaultOptions() - for _, o := range opts { - o.apply(rtOpts) - } - - // Curry the observer with dynamic labels before checking the remaining labels. - code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels())) - - return func(r *http.Request) (*http.Response, error) { - start := time.Now() - resp, err := next.RoundTrip(r) - if err == nil { - l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) - for label, resolve := range rtOpts.extraLabelsFromCtx { - l[label] = resolve(resp.Request.Context()) - } - observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context())) - } - return resp, err - } -} - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) - - return next.RoundTrip(r) - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go deleted file mode 100644 index 356edb7868..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "errors" - "net/http" - "strconv" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -// magicString is used for the hacky label test in checkLabels. Remove once fixed. -const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" - -// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], -// which falls back to [prometheus.Observer.Observe] if no labels are provided. -func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { - if labels == nil { - obs.Observe(val) - return - } - obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) -} - -// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], -// which falls back to [prometheus.Counter.Add] if no labels are provided. -func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { - if labels == nil { - obs.Add(val) - return - } - obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) -} - -// InstrumentHandlerInFlight is a middleware that wraps the provided -// http.Handler. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.Handler. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - g.Inc() - defer g.Dec() - next.ServeHTTP(w, r) - }) -} - -// InstrumentHandlerDuration is a middleware that wraps the provided -// http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have valid metric and label names and must have zero, -// one, or two non-const non-curried labels. For those, the only allowed label -// names are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the request duration -// in seconds. Partitioning happens by HTTP status code and/or HTTP method if -// the respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - // Curry the observer with dynamic labels before checking the remaining labels. - code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) - - if code { - return func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) - } - } - - return func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - next.ServeHTTP(w, r) - l := labels(code, method, r.Method, 0, hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) - } -} - -// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler -// to observe the request result with the provided CounterVec. The CounterVec -// must have valid metric and label names and must have zero, one, or two -// non-const non-curried labels. For those, the only allowed label names are -// "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the -// CounterVec happens by HTTP status code and/or HTTP method if the respective -// instance label names are present in the CounterVec. For unpartitioned -// counting, use a CounterVec with zero labels. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, the Counter is not incremented. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - // Curry the counter with dynamic labels before checking the remaining labels. - code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels())) - - if code { - return func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) - } - } - - return func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - - l := labels(code, method, r.Method, 0, hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) - } -} - -// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided -// http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have valid -// metric and label names and must have zero, one, or two non-const non-curried -// labels. For those, the only allowed label names are "code" and "method". The -// function panics otherwise. For the "method" label a predefined default label -// value set is used to filter given values. Values besides predefined values -// will count as `unknown` method.`WithExtraMethods` can be used to add more -// methods to the set. The Observe method of the Observer in the -// ObserverVec is called with the request duration in seconds. Partitioning -// happens by HTTP status code and/or HTTP method if the respective instance -// label names are present in the ObserverVec. For unpartitioned observations, -// use an ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler panics before calling WriteHeader, no value is -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - // Curry the observer with dynamic labels before checking the remaining labels. - code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) - - return func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, func(status int) { - l := labels(code, method, r.Method, status, hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) - }) - next.ServeHTTP(d, r) - } -} - -// InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. The -// ObserverVec must have valid metric and label names and must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the request size in -// bytes. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - // Curry the observer with dynamic labels before checking the remaining labels. - code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) - - if code { - return func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - size := computeApproximateRequestSize(r) - - l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) - } - } - - return func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - size := computeApproximateRequestSize(r) - - l := labels(code, method, r.Method, 0, hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) - } -} - -// InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. The -// ObserverVec must have valid metric and label names and must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the response size in -// bytes. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - // Curry the observer with dynamic labels before checking the remaining labels. - code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) - for label, resolve := range hOpts.extraLabelsFromCtx { - l[label] = resolve(r.Context()) - } - observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context())) - }) -} - -// checkLabels returns whether the provided Collector has a non-const, -// non-curried label named "code" and/or "method". It panics if the provided -// Collector does not have a Desc or has more than one Desc or its Desc is -// invalid. It also panics if the Collector has any non-const, non-curried -// labels that are not named "code" or "method". -func checkLabels(c prometheus.Collector) (code, method bool) { - // TODO(beorn7): Remove this hacky way to check for instance labels - // once Descriptors can have their dimensionality queried. - var ( - desc *prometheus.Desc - m prometheus.Metric - pm dto.Metric - lvs []string - ) - - // Get the Desc from the Collector. - descc := make(chan *prometheus.Desc, 1) - c.Describe(descc) - - select { - case desc = <-descc: - default: - panic("no description provided by collector") - } - select { - case <-descc: - panic("more than one description provided by collector") - default: - } - - close(descc) - - // Make sure the Collector has a valid Desc by registering it with a - // temporary registry. - prometheus.NewRegistry().MustRegister(c) - - // Create a ConstMetric with the Desc. Since we don't know how many - // variable labels there are, try for as long as it needs. - for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { - m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) - } - - // Write out the metric into a proto message and look at the labels. - // If the value is not the magicString, it is a constLabel, which doesn't interest us. - // If the label is curried, it doesn't interest us. - // In all other cases, only "code" or "method" is allowed. - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString || isLabelCurried(c, name) { - continue - } - switch name { - case "code": - code = true - case "method": - method = true - default: - panic("metric partitioned with non-supported labels") - } - } - return -} - -func isLabelCurried(c prometheus.Collector, label string) bool { - // This is even hackier than the label test above. - // We essentially try to curry again and see if it works. - // But for that, we need to type-convert to the two - // types we use here, ObserverVec or *CounterVec. - switch v := c.(type) { - case *prometheus.CounterVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - case prometheus.ObserverVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - default: - panic("unsupported metric vec type") - } - return true -} - -func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { - labels := prometheus.Labels{} - - if !(code || method) { - return labels - } - - if code { - labels["code"] = sanitizeCode(status) - } - if method { - labels["method"] = sanitizeMethod(reqMethod, extraMethods...) - } - - return labels -} - -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} - -// If the wrapped http.Handler has a known method, it will be sanitized and returned. -// Otherwise, "unknown" will be returned. The known method list can be extended -// as needed by using extraMethods parameter. -func sanitizeMethod(m string, extraMethods ...string) string { - // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for - // the methods chosen as default. - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - case "TRACE", "trace": - return "trace" - case "PATCH", "patch": - return "patch" - default: - for _, method := range extraMethods { - if strings.EqualFold(m, method) { - return strings.ToLower(m) - } - } - return "unknown" - } -} - -// If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, sanitizeCode will return 200, for consistency with behavior in -// the stdlib. -func sanitizeCode(s int) string { - // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200, 0: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - if s >= 100 && s <= 599 { - return strconv.Itoa(s) - } - return "unknown" - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go deleted file mode 100644 index c5039590f7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "io" -) - -// NewZstdWriter enables zstd write support if non-nil. -var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error) diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go deleted file mode 100644 index 5d4383aa14..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "context" - - "github.com/prometheus/client_golang/prometheus" -) - -// Option are used to configure both handler (middleware) or round tripper. -type Option interface { - apply(*options) -} - -// LabelValueFromCtx are used to compute the label value from request context. -// Context can be filled with values from request through middleware. -type LabelValueFromCtx func(ctx context.Context) string - -// options store options for both a handler or round tripper. -type options struct { - extraMethods []string - getExemplarFn func(requestCtx context.Context) prometheus.Labels - extraLabelsFromCtx map[string]LabelValueFromCtx -} - -func defaultOptions() *options { - return &options{ - getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }, - extraLabelsFromCtx: map[string]LabelValueFromCtx{}, - } -} - -func (o *options) emptyDynamicLabels() prometheus.Labels { - labels := prometheus.Labels{} - - for label := range o.extraLabelsFromCtx { - labels[label] = "" - } - - return labels -} - -type optionApplyFunc func(*options) - -func (o optionApplyFunc) apply(opt *options) { o(opt) } - -// WithExtraMethods adds additional HTTP methods to the list of allowed methods. -// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. -// -// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. -func WithExtraMethods(methods ...string) Option { - return optionApplyFunc(func(o *options) { - o.extraMethods = methods - }) -} - -// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics. -// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but -// metric will continue to observe/increment. -func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { - return optionApplyFunc(func(o *options) { - o.getExemplarFn = getExemplarFn - }) -} - -// WithLabelFromCtx registers a label for dynamic resolution with access to context. -// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage -func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option { - return optionApplyFunc(func(o *options) { - o.extraLabelsFromCtx[name] = valueFn - }) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index c6fd2f58b7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,1076 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode/utf8" - - "github.com/prometheus/client_golang/prometheus/internal" - - "github.com/cespare/xxhash/v2" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "google.golang.org/protobuf/proto" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe method does not yield any descriptors) are excluded from the check. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -// Error formats the contained errors as a bullet point list, preceded by the -// total number of errors. Note that this results in a multi-line string. -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements Registerer, Gatherer, -// and Collector. The zero value is not usable. Create instances with -// NewRegistry or NewPedanticRegistry. -// -// Registry implements Collector to allow it to be used for creating groups of -// metrics. See the Grouping example for how this can be done. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - uncheckedCollectors []Collector - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // All desc IDs XOR'd together. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, XOR it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID ^= desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - continue - } - - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - continue - } - newDimHashesByName[desc.fqName] = desc.dimHash - } - // A Collector yielding no Desc at all is considered unchecked. - if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil - } - if existing, exists := r.collectorsByID[collectorID]; exists { - switch e := existing.(type) { - case *wrappingCollector: - return AlreadyRegisteredError{ - ExistingCollector: e.unwrapRecursively(), - NewCollector: c, - } - default: - return AlreadyRegisteredError{ - ExistingCollector: e, - NewCollector: c, - } - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // All desc IDs XOR'd together. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID ^= desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - r.mtx.RLock() - - if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { - // Fast path. - r.mtx.RUnlock() - return nil, nil - } - - var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) - for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector - } - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() - - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. - defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } - } - }() - - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckedMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// Describe implements Collector. -func (r *Registry) Describe(ch chan<- *Desc) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - // Only report the checked Collectors; unchecked collectors don't report any - // Desc. - for _, c := range r.collectorsByID { - c.Describe(ch) - } -} - -// Collect implements Collector. -func (r *Registry) Collect(ch chan<- Metric) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - for _, c := range r.collectorsByID { - c.Collect(ch) - } - for _, c := range r.uncheckedCollectors { - c.Collect(ch) - } -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0o644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %w", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calls are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - multiErr := MultiError{} - if errors.As(err, &multiErr) { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// checkSuffixCollisions checks for collisions with the “magic” suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } - } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } - } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) - } - } - return nil -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, -) error { - name := metricFamily.GetName() - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), - ) - } - - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := xxhash.New() - h.WriteString(name) - h.Write(separatorByteSlice) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { - // We cannot sort dtoMetric.Label in place as it is immutable by contract. - copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) - copy(copiedLabels, dtoMetric.Label) - sort.Sort(internal.LabelPairSorter(copiedLabels)) - dtoMetric.Label = copiedLabels - } - for _, lp := range dtoMetric.Label { - h.WriteString(lp.GetName()) - h.Write(separatorByteSlice) - h.WriteString(lp.GetValue()) - h.Write(separatorByteSlice) - } - if dtoMetric.TimestampMs != nil { - h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) - h.Write(separatorByteSlice) - } - hSum := h.Sum64() - if _, exists := metricHashes[hSum]; exists { - return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, - ) - } - metricHashes[hSum] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) - copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels.names { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(internal.LabelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} - -var _ TransactionalGatherer = &MultiTRegistry{} - -// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple -// transactional gatherers. -// -// It is caller responsibility to ensure two registries have mutually exclusive metric families, -// no deduplication will happen. -type MultiTRegistry struct { - tGatherers []TransactionalGatherer -} - -// NewMultiTRegistry creates MultiTRegistry. -func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { - return &MultiTRegistry{ - tGatherers: tGatherers, - } -} - -// Gather implements TransactionalGatherer interface. -func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { - errs := MultiError{} - - dFns := make([]func(), 0, len(r.tGatherers)) - // TODO(bwplotka): Implement concurrency for those? - for _, g := range r.tGatherers { - // TODO(bwplotka): Check for duplicates? - m, d, err := g.Gather() - errs.Append(err) - - mfs = append(mfs, m...) - dFns = append(dFns, d) - } - - // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. - sort.Slice(mfs, func(i, j int) bool { - return *mfs[i].Name < *mfs[j].Name - }) - return mfs, func() { - for _, d := range dFns { - d() - } - }, errs.MaybeUnwrap() -} - -// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory -// used by metric family is no longer used by a caller. This allows implementations with cache. -type TransactionalGatherer interface { - // Gather returns metrics in a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - // - // Important: done is expected to be triggered (even if the error occurs!) - // once caller does not need returned slice of dto.MetricFamily. - Gather() (_ []*dto.MetricFamily, done func(), err error) -} - -// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. -func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { - return &noTransactionGatherer{g: g} -} - -type noTransactionGatherer struct { - g Gatherer -} - -// Gather implements TransactionalGatherer interface. -func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { - mfs, err := g.g.Gather() - return mfs, func() {}, err -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index ac5203c6fa..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/beorn7/perks/quantile" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v1.0.0 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -var errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v1.0.0 of the library. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile” - // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is an empty map, resulting in a summary without - // quantiles. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Only applies to pre-calculated quantiles, does not - // apply to _sum and _count. Must be positive. The default value is - // DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 - - // now is for testing purposes, by default it's time.Now. - now func() time.Time -} - -// SummaryVecOpts bundles the options to create a SummaryVec metric. -// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type SummaryVecOpts struct { - SummaryOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// Problem with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels.names) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) - } - - for _, n := range desc.variableLabels.names { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = map[float64]float64{} - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - if opts.now == nil { - opts.now = time.Now - } - if len(opts.Objectives) == 0 { - // Use the lock-free implementation of a Summary without objectives. - s := &noObjectivesSummary{ - desc: desc, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*summaryCounts{{}, {}}, - } - s.init(s) // Init self-collection. - s.createdTs = timestamppb.New(opts.now()) - return s - } - - s := &summary{ - desc: desc, - now: opts.now, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: MakeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = opts.now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - s.createdTs = timestamppb.New(opts.now()) - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - now func() time.Time - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time - - createdTs *timestamppb.Timestamp -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := s.now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{ - CreatedTimestamp: s.createdTs, - } - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(s.now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type summaryCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 -} - -type noObjectivesSummary struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // summaryCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the summary) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*summaryCounts - - labelPairs []*dto.LabelPair - - createdTs *timestamppb.Timestamp -} - -func (s *noObjectivesSummary) Desc() *Desc { - return s.desc -} - -func (s *noObjectivesSummary) Observe(v float64) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&s.countAndHotIdx, 1) - hotCounts := s.counts[n>>63] - - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (s *noObjectivesSummary) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - s.writeMtx.Lock() - defer s.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := s.counts[n>>63] - coldCounts := s.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - CreatedTimestamp: s.createdTs, - } - - out.Summary = sum - out.Label = s.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - return nil -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *MetricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile” is an illegal -// label name. NewSummaryVec will panic if this label name is used. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - return V2.NewSummaryVec(SummaryVecOpts{ - SummaryOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. -func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { - for _, ln := range opts.VariableLabels.labelNames() { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - return &SummaryVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts.SummaryOpts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair - createdTs *timestamppb.Timestamp -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{ - CreatedTimestamp: s.createdTs, - } - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} - -// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. -func NewConstSummaryWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - ct time.Time, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: MakeLabelPairs(desc, labelValues), - createdTs: timestamppb.New(ct), - }, nil -} - -// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where -// NewConstSummaryWithCreatedTimestamp would have returned an error. -func MustNewConstSummaryWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - ct time.Time, - labelValues ...string, -) Metric { - m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index 52344fef53..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar -// later on will be also supported. -// Timer is usually used to time a function call in the -// following way: -// -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -// -// or -// -// func TimeMeWithExemplar() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDurationWithExemplar(exemplar) -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. The observed -// duration is also returned. ObserveDuration is usually called with a defer -// statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() time.Duration { - d := time.Since(t.begin) - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} - -// ObserveDurationWithExemplar is like ObserveDuration, but it will also -// observe exemplar with the duration unless exemplar is nil or provided Observer can't -// be casted to ExemplarObserver. -func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { - d := time.Since(t.begin) - eo, ok := t.observer.(ExemplarObserver) - if ok && exemplar != nil { - eo.ObserveWithExemplar(d.Seconds(), exemplar) - return d - } - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 0f9ce63f40..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/value.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index cc23011fad..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "time" - "unicode/utf8" - - "github.com/prometheus/client_golang/prometheus/internal" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. Use UntypedValue to mark a metric -// with an unknown type. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -var ( - CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() - GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() - UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() -) - -func (v ValueType) ToDTO() *dto.MetricType { - switch v { - case CounterValue: - return CounterMetricTypePtr - case GaugeValue: - return GaugeMetricTypePtr - default: - return UntypedMetricTypePtr - } -} - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: MakeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - - metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { - return nil, err - } - - return &constMetric{ - desc: desc, - metric: metric, - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters -// with created timestamp set and returns an error for other metric types. -func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - switch valueType { - case CounterValue: - break - default: - return nil, errors.New("created timestamps are only supported for counters") - } - - metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { - return nil, err - } - - return &constMetric{ - desc: desc, - metric: metric, - }, nil -} - -// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where -// NewConstMetricWithCreatedTimestamp would have returned an error. -func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { - m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - metric *dto.Metric -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - out.Label = m.metric.Label - out.Counter = m.metric.Counter - out.Gauge = m.metric.Gauge - out.Untyped = m.metric.Untyped - return nil -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - e *dto.Exemplar, - m *dto.Metric, - ct *timestamppb.Timestamp, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -// MakeLabelPairs is a helper function to create protobuf LabelPairs from the -// variable and constant labels in the provided Desc. The values for the -// variable labels are defined by the labelValues slice, which must be in the -// same order as the corresponding variable labels in the Desc. -// -// This function is only needed for custom Metric implementations. See MetricVec -// example. -func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels.names) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, l := range desc.variableLabels.names { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(l), - Value: proto.String(labelValues[i]), - }) - } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(internal.LabelPairSorter(labelPairs)) - return labelPairs -} - -// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 128 - -// newExemplar creates a new dto.Exemplar from the provided values. An error is -// returned if any of the label names or values are invalid or if the total -// number of runes in the label names and values exceeds ExemplarMaxRunes. -func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { - e := &dto.Exemplar{} - e.Value = proto.Float64(value) - tsProto := timestamppb.New(ts) - if err := tsProto.CheckValid(); err != nil { - return nil, err - } - e.Timestamp = tsProto - labelPairs := make([]*dto.LabelPair, 0, len(l)) - var runes int - for name, value := range l { - if !checkLabelName(name) { - return nil, fmt.Errorf("exemplar label name %q is invalid", name) - } - runes += utf8.RuneCountInString(name) - if !utf8.ValidString(value) { - return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) - } - runes += utf8.RuneCountInString(value) - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(name), - Value: proto.String(value), - }) - } - if runes > ExemplarMaxRunes { - return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) - } - e.Label = labelPairs - return e, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 2c808eece0..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// MetricVec is a Collector to bundle metrics of the same name that differ in -// their label values. MetricVec is not used directly but as a building block -// for implementations of vectors of a given metric type, like GaugeVec, -// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be -// used for custom Metric implementations. -// -// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in -// FooVec and initialize it with NewMetricVec. Implement wrappers for -// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather -// than (Metric, error). Similarly, create a wrapper for CurryWith that returns -// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also -// add the convenience methods WithLabelValues, With, and MustCurryWith, which -// panic instead of returning errors. See also the MetricVec example. -type MetricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 - hashAddByte func(h uint64, b byte) uint64 -} - -// NewMetricVec returns an initialized metricVec. -func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - lvs = constrainLabelValues(m.desc, lvs, m.curry) - - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - labels, closer := constrainLabels(m.desc, labels) - defer closer() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) -} - -// DeletePartialMatch deletes all metrics where the variable labels contain all of those -// passed in as labels. The order of the labels does not matter. -// It returns the number of metrics deleted. -// -// Note that curried labels will never be matched if deleting from the curried vector. -// To match curried labels with DeletePartialMatch, it must be called on the base vector. -func (m *MetricVec) DeletePartialMatch(labels Labels) int { - labels, closer := constrainLabels(m.desc, labels) - defer closer() - - return m.metricMap.deleteByLabels(labels, m.curry) -} - -// Without explicit forwarding of Describe, Collect, Reset, those methods won't -// show up in GoDoc. - -// Describe implements Collector. -func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { m.metricMap.Reset() } - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the MetricVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -// -// Note that CurryWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, labelName := range m.desc.variableLabels.names { - val, ok := labels[labelName] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", labelName) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{ - i, - m.desc.variableLabels.constrain(labelName, val), - }) - } - } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &MetricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created (by -// calling the newMetric function provided during construction of the -// MetricVec). -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it in its initial state. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// -// Note that GetMetricWithLabelValues is usually not called directly but through -// a wrapper around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - lvs = constrainLabelValues(m.desc, lvs, m.curry) - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -// -// Note that GetMetricWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric implementation, -// for example GaugeVec. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - labels, closer := constrainLabels(m.desc, labels) - defer closer() - - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels.names); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, labelName := range m.desc.variableLabels.names { - val, ok := labels[labelName] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", labelName) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", labelName) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.metrics { - delete(m.metrics, h) - } -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - - i := findMetricWithLabelValues(metrics, lvs, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - i := findMetricWithLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByLabels deletes a metric if the given labels are present in the metric. -func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { - m.mtx.Lock() - defer m.mtx.Unlock() - - var numDeleted int - - for h, metrics := range m.metrics { - i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - // Didn't find matching labels in this metric slice. - continue - } - delete(m.metrics, h) - numDeleted++ - } - - return numDeleted -} - -// findMetricWithPartialLabel returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithPartialLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchPartialLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -// indexOf searches the given slice of strings for the target string and returns -// the index or len(items) as well as a boolean whether the search succeeded. -func indexOf(target string, items []string) (int, bool) { - for i, l := range items { - if l == target { - return i, true - } - } - return len(items), false -} - -// valueMatchesVariableOrCurriedValue determines if a value was previously curried, -// and returns whether it matches either the "base" value or the curried value accordingly. -// It also indicates whether the match is against a curried or uncurried value. -func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { - for _, curriedValue := range curry { - if curriedValue.index == index { - // This label was curried. See if the curried value matches our target. - return curriedValue.value == targetValue, true - } - } - // This label was not curried. See if the current value matches our target label. - return values[index] == targetValue, false -} - -// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. -func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - for l, v := range labels { - // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) - if validLabel { - // Check the value of that label against the target value. - // We don't consider curried values in partial matches. - matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) - if matches && !curried { - continue - } - } - return false - } - return true -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) - if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabels retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) - if !ok { - lvs := extractLabelValues(m.desc, labels, curry) - metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { - return false - } - var iLVs, iCurry int - for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { - return false - } - iLVs++ - } - return true -} - -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { - return false - } - iCurry := 0 - for i, k := range desc.variableLabels.names { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if values[i] != labels[k] { - return false - } - } - return true -} - -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels.names { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = labels[k] - } - return labelValues -} - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} - -var labelsPool = &sync.Pool{ - New: func() interface{} { - return make(Labels) - }, -} - -func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { - if len(desc.variableLabels.labelConstraints) == 0 { - // Fast path when there's no constraints - return labels, func() {} - } - - constrainedLabels := labelsPool.Get().(Labels) - for l, v := range labels { - constrainedLabels[l] = desc.variableLabels.constrain(l, v) - } - - return constrainedLabels, func() { - for k := range constrainedLabels { - delete(constrainedLabels, k) - } - labelsPool.Put(constrainedLabels) - } -} - -func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { - if len(desc.variableLabels.labelConstraints) == 0 { - // Fast path when there's no constraints - return lvs - } - - constrainedValues := make([]string, len(lvs)) - var iCurry, iLVs int - for i := 0; i < len(lvs)+len(curry); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - iCurry++ - continue - } - - if i < len(desc.variableLabels.names) { - constrainedValues[iLVs] = desc.variableLabels.constrain( - desc.variableLabels.names[i], - lvs[iLVs], - ) - } else { - constrainedValues[iLVs] = lvs[iLVs] - } - iLVs++ - } - return constrainedValues -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/vnext.go deleted file mode 100644 index 42bc3a8f06..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/vnext.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -type v2 struct{} - -// V2 is a struct that can be referenced to access experimental API that might -// be present in v2 of client golang someday. It offers extended functionality -// of v1 with slightly changed API. It is acceptable to use some pieces from v1 -// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` -// in the same codebase. -var V2 = v2{} diff --git a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index 25da157f15..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/prometheus/client_golang/prometheus/internal" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. Wrapping a nil value is valid, resulting -// in a no-op Registerer. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics -// exposed. See also -// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// Wrapping a nil value is valid, resulting in a no-op Registerer. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, -// respectively.) -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - if r.wrappedRegisterer == nil { - return nil - } - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - if r.wrappedRegisterer == nil { - return - } - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - if r.wrappedRegisterer == nil { - return false - } - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -func (c *wrappingCollector) unwrapRecursively() Collector { - switch wc := c.wrappedCollector.(type) { - case *wrappingCollector: - return wc.unwrapRecursively() - default: - return wc - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(internal.LabelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/openshift/tools/vendor/github.com/prometheus/client_model/LICENSE b/openshift/tools/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/openshift/tools/vendor/github.com/prometheus/client_model/NOTICE b/openshift/tools/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e410e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/openshift/tools/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/openshift/tools/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index 2f15490758..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,1399 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.20.3 -// source: io/prometheus/client/metrics.proto - -package io_prometheus_client - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type MetricType int32 - -const ( - // COUNTER must use the Metric field "counter". - MetricType_COUNTER MetricType = 0 - // GAUGE must use the Metric field "gauge". - MetricType_GAUGE MetricType = 1 - // SUMMARY must use the Metric field "summary". - MetricType_SUMMARY MetricType = 2 - // UNTYPED must use the Metric field "untyped". - MetricType_UNTYPED MetricType = 3 - // HISTOGRAM must use the Metric field "histogram". - MetricType_HISTOGRAM MetricType = 4 - // GAUGE_HISTOGRAM must use the Metric field "histogram". - MetricType_GAUGE_HISTOGRAM MetricType = 5 -) - -// Enum value maps for MetricType. -var ( - MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", - } - MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, - } -) - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} - -func (x MetricType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (MetricType) Descriptor() protoreflect.EnumDescriptor { - return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() -} - -func (MetricType) Type() protoreflect.EnumType { - return &file_io_prometheus_client_metrics_proto_enumTypes[0] -} - -func (x MetricType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *MetricType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = MetricType(num) - return nil -} - -// Deprecated: Use MetricType.Descriptor instead. -func (MetricType) EnumDescriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} -} - -type LabelPair struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (x *LabelPair) Reset() { - *x = LabelPair{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelPair) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelPair) ProtoMessage() {} - -func (x *LabelPair) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. -func (*LabelPair) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} -} - -func (x *LabelPair) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *LabelPair) GetValue() string { - if x != nil && x.Value != nil { - return *x.Value - } - return "" -} - -type Gauge struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` -} - -func (x *Gauge) Reset() { - *x = Gauge{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Gauge) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Gauge) ProtoMessage() {} - -func (x *Gauge) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. -func (*Gauge) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} -} - -func (x *Gauge) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -type Counter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` -} - -func (x *Counter) Reset() { - *x = Counter{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Counter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Counter) ProtoMessage() {} - -func (x *Counter) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Counter.ProtoReflect.Descriptor instead. -func (*Counter) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} -} - -func (x *Counter) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -func (x *Counter) GetExemplar() *Exemplar { - if x != nil { - return x.Exemplar - } - return nil -} - -func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.CreatedTimestamp - } - return nil -} - -type Quantile struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` -} - -func (x *Quantile) Reset() { - *x = Quantile{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Quantile) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Quantile) ProtoMessage() {} - -func (x *Quantile) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. -func (*Quantile) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} -} - -func (x *Quantile) GetQuantile() float64 { - if x != nil && x.Quantile != nil { - return *x.Quantile - } - return 0 -} - -func (x *Quantile) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -type Summary struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` -} - -func (x *Summary) Reset() { - *x = Summary{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Summary) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Summary) ProtoMessage() {} - -func (x *Summary) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Summary.ProtoReflect.Descriptor instead. -func (*Summary) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} -} - -func (x *Summary) GetSampleCount() uint64 { - if x != nil && x.SampleCount != nil { - return *x.SampleCount - } - return 0 -} - -func (x *Summary) GetSampleSum() float64 { - if x != nil && x.SampleSum != nil { - return *x.SampleSum - } - return 0 -} - -func (x *Summary) GetQuantile() []*Quantile { - if x != nil { - return x.Quantile - } - return nil -} - -func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.CreatedTimestamp - } - return nil -} - -type Untyped struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` -} - -func (x *Untyped) Reset() { - *x = Untyped{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Untyped) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Untyped) ProtoMessage() {} - -func (x *Untyped) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. -func (*Untyped) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} -} - -func (x *Untyped) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -type Histogram struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. - CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` - // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. - // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. - // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). - // In the future, more bucket schemas may be added using numbers < -4 or > 8. - Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. - // Negative buckets for the native histogram. - NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` - // Use either "negative_delta" or "negative_count", the former for - // regular histograms with integer counts, the latter for float - // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. - // Positive buckets for the native histogram. - // Use a no-op span (offset 0, length 0) for a native histogram without any - // observations yet and with a zero_threshold of 0. Otherwise, it would be - // indistinguishable from a classic histogram. - PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` - // Use either "positive_delta" or "positive_count", the former for - // regular histograms with integer counts, the latter for float - // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. - // Only used for native histograms. These exemplars MUST have a timestamp. - Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` -} - -func (x *Histogram) Reset() { - *x = Histogram{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Histogram) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Histogram) ProtoMessage() {} - -func (x *Histogram) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. -func (*Histogram) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} -} - -func (x *Histogram) GetSampleCount() uint64 { - if x != nil && x.SampleCount != nil { - return *x.SampleCount - } - return 0 -} - -func (x *Histogram) GetSampleCountFloat() float64 { - if x != nil && x.SampleCountFloat != nil { - return *x.SampleCountFloat - } - return 0 -} - -func (x *Histogram) GetSampleSum() float64 { - if x != nil && x.SampleSum != nil { - return *x.SampleSum - } - return 0 -} - -func (x *Histogram) GetBucket() []*Bucket { - if x != nil { - return x.Bucket - } - return nil -} - -func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.CreatedTimestamp - } - return nil -} - -func (x *Histogram) GetSchema() int32 { - if x != nil && x.Schema != nil { - return *x.Schema - } - return 0 -} - -func (x *Histogram) GetZeroThreshold() float64 { - if x != nil && x.ZeroThreshold != nil { - return *x.ZeroThreshold - } - return 0 -} - -func (x *Histogram) GetZeroCount() uint64 { - if x != nil && x.ZeroCount != nil { - return *x.ZeroCount - } - return 0 -} - -func (x *Histogram) GetZeroCountFloat() float64 { - if x != nil && x.ZeroCountFloat != nil { - return *x.ZeroCountFloat - } - return 0 -} - -func (x *Histogram) GetNegativeSpan() []*BucketSpan { - if x != nil { - return x.NegativeSpan - } - return nil -} - -func (x *Histogram) GetNegativeDelta() []int64 { - if x != nil { - return x.NegativeDelta - } - return nil -} - -func (x *Histogram) GetNegativeCount() []float64 { - if x != nil { - return x.NegativeCount - } - return nil -} - -func (x *Histogram) GetPositiveSpan() []*BucketSpan { - if x != nil { - return x.PositiveSpan - } - return nil -} - -func (x *Histogram) GetPositiveDelta() []int64 { - if x != nil { - return x.PositiveDelta - } - return nil -} - -func (x *Histogram) GetPositiveCount() []float64 { - if x != nil { - return x.PositiveCount - } - return nil -} - -func (x *Histogram) GetExemplars() []*Exemplar { - if x != nil { - return x.Exemplars - } - return nil -} - -// A Bucket of a conventional histogram, each of which is treated as -// an individual counter-like time series by Prometheus. -type Bucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` -} - -func (x *Bucket) Reset() { - *x = Bucket{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket) ProtoMessage() {} - -func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. -func (*Bucket) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} -} - -func (x *Bucket) GetCumulativeCount() uint64 { - if x != nil && x.CumulativeCount != nil { - return *x.CumulativeCount - } - return 0 -} - -func (x *Bucket) GetCumulativeCountFloat() float64 { - if x != nil && x.CumulativeCountFloat != nil { - return *x.CumulativeCountFloat - } - return 0 -} - -func (x *Bucket) GetUpperBound() float64 { - if x != nil && x.UpperBound != nil { - return *x.UpperBound - } - return 0 -} - -func (x *Bucket) GetExemplar() *Exemplar { - if x != nil { - return x.Exemplar - } - return nil -} - -// A BucketSpan defines a number of consecutive buckets in a native -// histogram with their offset. Logically, it would be more -// straightforward to include the bucket counts in the Span. However, -// the protobuf representation is more compact in the way the data is -// structured here (with all the buckets in a single array separate -// from the Spans). -type BucketSpan struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. -} - -func (x *BucketSpan) Reset() { - *x = BucketSpan{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BucketSpan) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BucketSpan) ProtoMessage() {} - -func (x *BucketSpan) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. -func (*BucketSpan) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} -} - -func (x *BucketSpan) GetOffset() int32 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -func (x *BucketSpan) GetLength() uint32 { - if x != nil && x.Length != nil { - return *x.Length - } - return 0 -} - -type Exemplar struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. -} - -func (x *Exemplar) Reset() { - *x = Exemplar{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Exemplar) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Exemplar) ProtoMessage() {} - -func (x *Exemplar) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. -func (*Exemplar) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} -} - -func (x *Exemplar) GetLabel() []*LabelPair { - if x != nil { - return x.Label - } - return nil -} - -func (x *Exemplar) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -type Metric struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` -} - -func (x *Metric) Reset() { - *x = Metric{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metric) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metric) ProtoMessage() {} - -func (x *Metric) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metric.ProtoReflect.Descriptor instead. -func (*Metric) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} -} - -func (x *Metric) GetLabel() []*LabelPair { - if x != nil { - return x.Label - } - return nil -} - -func (x *Metric) GetGauge() *Gauge { - if x != nil { - return x.Gauge - } - return nil -} - -func (x *Metric) GetCounter() *Counter { - if x != nil { - return x.Counter - } - return nil -} - -func (x *Metric) GetSummary() *Summary { - if x != nil { - return x.Summary - } - return nil -} - -func (x *Metric) GetUntyped() *Untyped { - if x != nil { - return x.Untyped - } - return nil -} - -func (x *Metric) GetHistogram() *Histogram { - if x != nil { - return x.Histogram - } - return nil -} - -func (x *Metric) GetTimestampMs() int64 { - if x != nil && x.TimestampMs != nil { - return *x.TimestampMs - } - return 0 -} - -type MetricFamily struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` -} - -func (x *MetricFamily) Reset() { - *x = MetricFamily{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricFamily) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricFamily) ProtoMessage() {} - -func (x *MetricFamily) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. -func (*MetricFamily) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} -} - -func (x *MetricFamily) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *MetricFamily) GetHelp() string { - if x != nil && x.Help != nil { - return *x.Help - } - return "" -} - -func (x *MetricFamily) GetType() MetricType { - if x != nil && x.Type != nil { - return *x.Type - } - return MetricType_COUNTER -} - -func (x *MetricFamily) GetMetric() []*Metric { - if x != nil { - return x.Metric - } - return nil -} - -func (x *MetricFamily) GetUnit() string { - if x != nil && x.Unit != nil { - return *x.Unit - } - return "" -} - -var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor - -var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, - 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, - 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, - 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47, - 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, - 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, - 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, - 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, - 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, - 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, - 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, - 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, - 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, - 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, - 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, - 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, - 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, - 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, - 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, - 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, - 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, - 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, - 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, - 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, - 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, - 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, - 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, - 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, - 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, - 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, - 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, - 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, - 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, -} - -var ( - file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once - file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc -) - -func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { - file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { - file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) - }) - return file_io_prometheus_client_metrics_proto_rawDescData -} - -var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ - (MetricType)(0), // 0: io.prometheus.client.MetricType - (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair - (*Gauge)(nil), // 2: io.prometheus.client.Gauge - (*Counter)(nil), // 3: io.prometheus.client.Counter - (*Quantile)(nil), // 4: io.prometheus.client.Quantile - (*Summary)(nil), // 5: io.prometheus.client.Summary - (*Untyped)(nil), // 6: io.prometheus.client.Untyped - (*Histogram)(nil), // 7: io.prometheus.client.Histogram - (*Bucket)(nil), // 8: io.prometheus.client.Bucket - (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan - (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar - (*Metric)(nil), // 11: io.prometheus.client.Metric - (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily - (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp -} -var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ - 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar - 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp - 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile - 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp - 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket - 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp - 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan - 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar - 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 20, // [20:20] is the sub-list for method output_type - 20, // [20:20] is the sub-list for method input_type - 20, // [20:20] is the sub-list for extension type_name - 20, // [20:20] is the sub-list for extension extendee - 0, // [0:20] is the sub-list for field type_name -} - -func init() { file_io_prometheus_client_metrics_proto_init() } -func file_io_prometheus_client_metrics_proto_init() { - if File_io_prometheus_client_metrics_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LabelPair); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Gauge); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Counter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Quantile); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Summary); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Untyped); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Histogram); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketSpan); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Exemplar); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metric); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricFamily); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, - NumEnums: 1, - NumMessages: 12, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_io_prometheus_client_metrics_proto_goTypes, - DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, - EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, - MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, - }.Build() - File_io_prometheus_client_metrics_proto = out.File - file_io_prometheus_client_metrics_proto_rawDesc = nil - file_io_prometheus_client_metrics_proto_goTypes = nil - file_io_prometheus_client_metrics_proto_depIdxs = nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/LICENSE b/openshift/tools/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/openshift/tools/vendor/github.com/prometheus/common/NOTICE b/openshift/tools/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1a5e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/openshift/tools/vendor/github.com/prometheus/common/expfmt/decode.go b/openshift/tools/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index 1448439b7f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/encoding/protodelim" - - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format.FormatType() { - case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r protodelim.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - opts := protodelim.UnmarshalOptions{ - MaxSize: -1, - } - if err := opts.UnmarshalFrom(d.r, v); err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - fams map[string]*dto.MetricFamily - err error -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - if d.err == nil { - // Read all metrics in one shot. - var p TextParser - d.fams, d.err = p.TextToMetricFamilies(d.r) - // If we don't get an error, store io.EOF for the end. - if d.err == nil { - d.err = io.EOF - } - } - // Pick off one MetricFamily per Decode until there's nothing left. - for key, fam := range d.fams { - v.Name = fam.Name - v.Help = fam.Help - v.Type = fam.Type - v.Metric = fam.Metric - delete(d.fams, key) - return nil - } - return d.err -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occurred. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/expfmt/encode.go b/openshift/tools/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index d7f3d76f55..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "google.golang.org/protobuf/encoding/protodelim" - "google.golang.org/protobuf/encoding/prototext" - - "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -// Closer is implemented by Encoders that need to be closed to finalize -// encoding. (For example, OpenMetrics needs a final `# EOF` line.) -// -// Note that all Encoder implementations returned from this package implement -// Closer, too, even if the Close call is a no-op. This happens in preparation -// for adding a Close method to the Encoder interface directly in a (mildly -// breaking) release in the future. -type Closer interface { - Close() error -} - -type encoderCloser struct { - encode func(*dto.MetricFamily) error - close func() error -} - -func (ec encoderCloser) Encode(v *dto.MetricFamily) error { - return ec.encode(v) -} - -func (ec encoderCloser) Close() error { - return ec.close() -} - -// Negotiate returns the Content-Type based on the given Accept header. If no -// appropriate accepted type is found, FmtText is returned (which is the -// Prometheus text format). This function will never negotiate FmtOpenMetrics, -// as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. -func Negotiate(h http.Header) Format { - escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { - switch Format(escapeParam) { - case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format("; escaping=" + escapeParam) - default: - // If the escaping parameter is unknown, ignore it. - } - } - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim + escapingScheme - case "text": - return FmtProtoText + escapingScheme - case "compact-text": - return FmtProtoCompact + escapingScheme - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + escapingScheme - } - } - return FmtText + escapingScheme -} - -// NegotiateIncludingOpenMetrics works like Negotiate but includes -// FmtOpenMetrics as an option for the result. Note that this function is -// temporary and will disappear once FmtOpenMetrics is fully supported and as -// such may be negotiated by the normal Negotiate function. -func NegotiateIncludingOpenMetrics(h http.Header) Format { - escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { - switch Format(escapeParam) { - case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format("; escaping=" + escapeParam) - default: - // If the escaping parameter is unknown, ignore it. - } - } - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim + escapingScheme - case "text": - return FmtProtoText + escapingScheme - case "compact-text": - return FmtProtoCompact + escapingScheme - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + escapingScheme - } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - switch ver { - case OpenMetricsVersion_1_0_0: - return FmtOpenMetrics_1_0_0 + escapingScheme - default: - return FmtOpenMetrics_0_0_1 + escapingScheme - } - } - } - return FmtText + escapingScheme -} - -// NewEncoder returns a new encoder based on content type negotiation. All -// Encoder implementations returned by NewEncoder also implement Closer, and -// callers should always call the Close method. It is currently only required -// for FmtOpenMetrics, but a future (breaking) release will add the Close method -// to the Encoder interface directly. The current version of the Encoder -// interface is kept for backwards compatibility. -// In cases where the Format does not allow for UTF-8 names, the global -// NameEscapingScheme will be applied. -// -// NewEncoder can be called with additional options to customize the OpenMetrics text output. -// For example: -// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) -// -// Extra options are ignored for all other formats. -func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { - escapingScheme := format.ToEscapingScheme() - - switch format.FormatType() { - case TypeProtoDelim: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) - return err - }, - close: func() error { return nil }, - } - case TypeProtoCompact: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) - return err - }, - close: func() error { return nil }, - } - case TypeProtoText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) - return err - }, - close: func() error { return nil }, - } - case TypeTextPlain: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) - return err - }, - close: func() error { return nil }, - } - case TypeOpenMetrics: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) - return err - }, - close: func() error { - _, err := FinalizeOpenMetrics(w) - return err - }, - } - } - panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go b/openshift/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index b26886560d..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -import ( - "errors" - "strings" - - "github.com/prometheus/common/model" -) - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire -// protocols. The Content-Type strings here are all for the legacy exposition -// formats, where valid characters for metric names and label names are limited. -// Support for arbitrary UTF-8 characters in those names is already partially -// implemented in this module (see model.ValidationScheme), but to actually use -// it on the wire, new content-type strings will have to be agreed upon and -// added here. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion_0_0_1 = "0.0.1" - OpenMetricsVersion_1_0_0 = "1.0.0" - - // The Content-Type values for the different wire protocols. Do not do direct - // comparisons to these constants, instead use the comparison functions. - // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. - FmtUnknown Format = `` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. - FmtProtoText Format = ProtoFmt + ` encoding=text` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. - FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. - FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) - -// FormatType is a Go enum representing the overall category for the given -// Format. As the number of Format permutations increases, doing basic string -// comparisons are not feasible, so this enum captures the most useful -// high-level attribute of the Format string. -type FormatType int - -const ( - TypeUnknown FormatType = iota - TypeProtoCompact - TypeProtoDelim - TypeProtoText - TypeTextPlain - TypeOpenMetrics -) - -// NewFormat generates a new Format from the type provided. Mostly used for -// tests, most Formats should be generated as part of content negotiation in -// encode.go. If a type has more than one version, the latest version will be -// returned. -func NewFormat(t FormatType) Format { - switch t { - case TypeProtoCompact: - return FmtProtoCompact - case TypeProtoDelim: - return FmtProtoDelim - case TypeProtoText: - return FmtProtoText - case TypeTextPlain: - return FmtText - case TypeOpenMetrics: - return FmtOpenMetrics_1_0_0 - default: - return FmtUnknown - } -} - -// NewOpenMetricsFormat generates a new OpenMetrics format matching the -// specified version number. -func NewOpenMetricsFormat(version string) (Format, error) { - if version == OpenMetricsVersion_0_0_1 { - return FmtOpenMetrics_0_0_1, nil - } - if version == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0, nil - } - return FmtUnknown, errors.New("unknown open metrics version string") -} - -// WithEscapingScheme returns a copy of Format with the specified escaping -// scheme appended to the end. If an escaping scheme already exists it is -// removed. -func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { - var terms []string - for _, p := range strings.Split(string(f), ";") { - toks := strings.Split(p, "=") - if len(toks) != 2 { - trimmed := strings.TrimSpace(p) - if len(trimmed) > 0 { - terms = append(terms, trimmed) - } - continue - } - key := strings.TrimSpace(toks[0]) - if key != model.EscapingKey { - terms = append(terms, strings.TrimSpace(p)) - } - } - terms = append(terms, model.EscapingKey+"="+s.String()) - return Format(strings.Join(terms, "; ")) -} - -// FormatType deduces an overall FormatType for the given format. -func (f Format) FormatType() FormatType { - toks := strings.Split(string(f), ";") - params := make(map[string]string) - for i, t := range toks { - if i == 0 { - continue - } - args := strings.Split(t, "=") - if len(args) != 2 { - continue - } - params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) - } - - switch strings.TrimSpace(toks[0]) { - case ProtoType: - if params["proto"] != ProtoProtocol { - return TypeUnknown - } - switch params["encoding"] { - case "delimited": - return TypeProtoDelim - case "text": - return TypeProtoText - case "compact-text": - return TypeProtoCompact - default: - return TypeUnknown - } - case OpenMetricsType: - if params["charset"] != "utf-8" { - return TypeUnknown - } - return TypeOpenMetrics - case "text/plain": - v, ok := params["version"] - if !ok { - return TypeTextPlain - } - if v == TextVersion { - return TypeTextPlain - } - return TypeUnknown - default: - return TypeUnknown - } -} - -// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the -// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid -// "escaping" term exists, that will be used. Otherwise, the global default will -// be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { - toks := strings.Split(p, "=") - if len(toks) != 2 { - continue - } - key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) - if key == model.EscapingKey { - scheme, err := model.ToEscapingScheme(value) - if err != nil { - return model.NameEscapingScheme - } - return scheme - } - } - return model.NameEscapingScheme -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go b/openshift/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index dfac962a4e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -//go:build gofuzz -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/openshift/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go deleted file mode 100644 index a21ed4ec1f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ /dev/null @@ -1,696 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - "google.golang.org/protobuf/types/known/timestamppb" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -type encoderOption struct { - withCreatedLines bool - withUnit bool -} - -type EncoderOption func(*encoderOption) - -// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder -// to include _created lines (See -// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). -// Created timestamps can improve the accuracy of series reset detection, but -// come with a bandwidth cost. -// -// At the time of writing, created timestamp ingestion is still experimental in -// Prometheus and need to be enabled with the feature-flag -// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are -// still possible. Therefore, it is recommended to use this feature with caution. -func WithCreatedLines() EncoderOption { - return func(t *encoderOption) { - t.withCreatedLines = true - } -} - -// WithUnit is an EncoderOption enabling a set unit to be written to the output -// and to be added to the metric name, if it's not there already, as a suffix. -// Without opting in this way, the unit will not be added to the metric name and, -// on top of that, the unit will not be passed onto the output, even if it -// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. -func WithUnit() EncoderOption { - return func(t *encoderOption) { - t.withUnit = true - } -} - -// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the -// OpenMetrics text format and writes the resulting lines to 'out'. It returns -// the number of bytes written and any error encountered. The output will have -// the same order as the input, no further sorting is performed. Furthermore, -// this function assumes the input is already sanitized and does not perform any -// sanity checks. If the input contains duplicate metrics or invalid metric or -// label names, the conversion will result in invalid text format output. -// -// If metric names conform to the legacy validation pattern, they will be placed -// outside the brackets in the traditional way, like `foo{}`. If the metric name -// fails the legacy validation check, it will be placed quoted inside the -// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// Similar to metric names, if label names conform to the legacy validation -// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label -// name fails the legacy validation check, it will be quoted: -// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// This function fulfills the type 'expfmt.encoder'. -// -// Note that OpenMetrics requires a final `# EOF` line. Since this function acts -// on individual metric families, it is the responsibility of the caller to -// append this line to 'out' once all metric families have been written. -// Conveniently, this can be done by calling FinalizeOpenMetrics. -// -// The output should be fully OpenMetrics compliant. However, there are a few -// missing features and peculiarities to avoid complications when switching from -// Prometheus to OpenMetrics or vice versa: -// -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` -// lines. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. -// -// - According to the OM specs, the `# UNIT` line is optional, but if populated, -// the unit has to be present in the metric name as its suffix: -// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). -// However, in order to accommodate any potential scenario where such a change in the -// metric name is not desirable, the users are here given the choice of either explicitly -// opt in, in case they wish for the unit to be included in the output AND in the metric name -// as a suffix (see the description of the WithUnit function above), -// or not to opt in, in case they don't want for any of that to happen. -// -// - No support for the following (optional) features: info type, -// stateset type, gaugehistogram type. -// -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). -// -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { - toOM := encoderOption{} - for _, option := range options { - option(&toOM) - } - - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var ( - n int - metricType = in.GetType() - compliantName = name - ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { - compliantName = name[:len(name)-6] - } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { - compliantName = compliantName + "_" + *in.Unit - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = writeName(w, compliantName) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, true) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = writeName(w, compliantName) - written += n - if err != nil { - return - } - switch metricType { - case dto.MetricType_COUNTER: - if strings.HasSuffix(name, "_total") { - n, err = w.WriteString(" counter\n") - } else { - n, err = w.WriteString(" unknown\n") - } - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" unknown\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - if toOM.withUnit && in.Unit != nil { - n, err = w.WriteString("# UNIT ") - written += n - if err != nil { - return - } - n, err = writeName(w, compliantName) - written += n - if err != nil { - return - } - - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Unit, true) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - - var createdTsBytesWritten int - - // Finally the samples, one line for each. - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" - } - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", compliantName, metric, - ) - } - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, "", 0, - metric.Counter.GetValue(), 0, false, - metric.Counter.Exemplar, - ) - if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { - createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) - n += createdTsBytesWritten - } - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", compliantName, metric, - ) - } - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, "", 0, - metric.Gauge.GetValue(), 0, false, - nil, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", compliantName, metric, - ) - } - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, "", 0, - metric.Untyped.GetValue(), 0, false, - nil, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", compliantName, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, compliantName, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, compliantName, "_count", metric, "", 0, - 0, metric.Summary.GetSampleCount(), true, - nil, - ) - if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { - createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) - n += createdTsBytesWritten - } - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", compliantName, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeOpenMetricsSample( - w, compliantName, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - 0, b.GetCumulativeCount(), true, - b.Exemplar, - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeOpenMetricsSample( - w, compliantName, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, compliantName, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, compliantName, "_count", metric, "", 0, - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { - createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) - n += createdTsBytesWritten - } - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", compliantName, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. -func FinalizeOpenMetrics(w io.Writer) (written int, err error) { - return w.Write([]byte("# EOF\n")) -} - -// writeOpenMetricsSample writes a single sample in OpenMetrics text format to -// w, given the metric name, the metric proto message itself, optionally an -// additional label name with a float64 value (use empty string as label name if -// not required), the value (optionally as float64 or uint64, determined by -// useIntValue), and optionally an exemplar (use nil if not required). The -// function returns the number of bytes written and any error encountered. -func writeOpenMetricsSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - floatValue float64, intValue uint64, useIntValue bool, - exemplar *dto.Exemplar, -) (int, error) { - written := 0 - n, err := writeOpenMetricsNameAndLabelPairs( - w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - if useIntValue { - n, err = writeUint(w, intValue) - } else { - n, err = writeOpenMetricsFloat(w, floatValue) - } - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - // TODO(beorn7): Format this directly without converting to a float first. - n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) - written += n - if err != nil { - return written, err - } - } - if exemplar != nil && len(exemplar.Label) > 0 { - n, err = writeExemplar(w, exemplar) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but -// formats the float in OpenMetrics style. -func writeOpenMetricsNameAndLabelPairs( - w enhancedWriter, - name string, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - var ( - written int - separator byte = '{' - metricInsideBraces = false - ) - - if name != "" { - // If the name does not pass the legacy validity check, we must put the - // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { - metricInsideBraces = true - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - separator = ',' - } - - n, err := writeName(w, name) - written += n - if err != nil { - return written, err - } - } - - if len(in) == 0 && additionalLabelName == "" { - if metricInsideBraces { - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - } - return written, nil - } - - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := writeName(w, lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeOpenMetricsCreated writes the created timestamp for a single time series -// following OpenMetrics text format to w, given the metric name, the metric proto -// message itself, optionally a suffix to be removed, e.g. '_total' for counters, -// an additional label name with a float64 value (use empty string as label name if -// not required) and the timestamp that represents the created timestamp. -// The function returns the number of bytes written and any error encountered. -func writeOpenMetricsCreated(w enhancedWriter, - name, suffixToTrim string, metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - createdTimestamp *timestamppb.Timestamp, -) (int, error) { - written := 0 - n, err := writeOpenMetricsNameAndLabelPairs( - w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - - // TODO(beorn7): Format this directly from components of ts to - // avoid overflow/underflow and precision issues of the float - // conversion. - n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) - written += n - if err != nil { - return written, err - } - - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeExemplar writes the provided exemplar in OpenMetrics format to w. The -// function returns the number of bytes written and any error encountered. -func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { - written := 0 - n, err := w.WriteString(" # ") - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, e.GetValue()) - written += n - if err != nil { - return written, err - } - if e.Timestamp != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - err = (*e).Timestamp.CheckValid() - if err != nil { - return written, err - } - ts := (*e).Timestamp.AsTime() - // TODO(beorn7): Format this directly from components of ts to - // avoid overflow/underflow and precision issues of the float - // conversion. - n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting -// number would otherwise contain neither a "." nor an "e". -func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return w.WriteString("1.0") - case f == 0: - return w.WriteString("0.0") - case f == -1: - return w.WriteString("-1.0") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - if !bytes.ContainsAny(*bp, "e.") { - *bp = append(*bp, '.', '0') - } - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeUint is like writeInt just for uint64. -func writeUint(w enhancedWriter, u uint64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendUint((*bp)[:0], u, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/expfmt/text_create.go b/openshift/tools/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index 4b86434b33..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "fmt" - "io" - "math" - "strconv" - "strings" - "sync" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// enhancedWriter has all the enhanced write functions needed here. bufio.Writer -// implements it. -type enhancedWriter interface { - io.Writer - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) - WriteByte(c byte) error -} - -const ( - initialNumBufSize = 24 -) - -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriter(io.Discard) - }, - } - numBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, initialNumBufSize) - return &b - }, - } -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// If metric names conform to the legacy validation pattern, they will be placed -// outside the brackets in the traditional way, like `foo{}`. If the metric name -// fails the legacy validation check, it will be placed quoted inside the -// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// Similar to metric names, if label names conform to the legacy validation -// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label -// name fails the legacy validation check, it will be quoted: -// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { - // Fail-fast checks. - if len(in.Metric) == 0 { - return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var n int - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = writeName(w, name) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, false) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = writeName(w, name) - written += n - if err != nil { - return - } - metricType := in.GetType() - switch metricType { - case dto.MetricType_COUNTER: - n, err = w.WriteString(" counter\n") - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Summary.GetSampleCount()), - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// writeSample writes a single sample in text format to w, given the metric -// name, the metric proto message itself, optionally an additional label name -// with a float64 value (use empty string as label name if not required), and -// the value. The function returns the number of bytes written and any error -// encountered. -func writeSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - value float64, -) (int, error) { - written := 0 - n, err := writeNameAndLabelPairs( - w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeFloat(w, value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeInt(w, *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given metric name and additional label pair into text formatted as -// required by the text format and writes it to 'w'. An empty slice in -// combination with an empty string 'additionalLabelName' results in nothing -// being written. Otherwise, the label pairs are written, escaped as required by -// the text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. If the metric name is not -// legacy-valid, it will be put inside the brackets as well. Legacy-invalid -// label names will also be quoted. -func writeNameAndLabelPairs( - w enhancedWriter, - name string, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - var ( - written int - separator byte = '{' - metricInsideBraces = false - ) - - if name != "" { - // If the name does not pass the legacy validity check, we must put the - // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { - metricInsideBraces = true - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - separator = ',' - } - n, err := writeName(w, name) - written += n - if err != nil { - return written, err - } - } - - if len(in) == 0 && additionalLabelName == "" { - if metricInsideBraces { - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - } - return written, nil - } - - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := writeName(w, lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -var ( - escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) - quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { - if includeDoubleQuote { - return quotedEscaper.WriteString(w, v) - } - return escaper.WriteString(w, v) -} - -// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes -// a few common cases for increased efficiency. For non-hardcoded cases, it uses -// strconv.AppendFloat to avoid allocations, similar to writeInt. -func writeFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return 1, w.WriteByte('1') - case f == 0: - return 1, w.WriteByte('0') - case f == -1: - return w.WriteString("-1") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeInt is equivalent to fmt.Fprint with an int64 argument but uses -// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid -// allocations. -func writeInt(w enhancedWriter, i int64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendInt((*bp)[:0], i, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} - -// writeName writes a string as-is if it complies with the legacy naming -// scheme, or escapes it in double quotes if not. -func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { - return w.WriteString(name) - } - var written int - var err error - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - var n int - n, err = writeEscapedString(w, name, true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - return written, err -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go b/openshift/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index 4067978a17..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,901 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "math" - "strconv" - "strings" - "unicode/utf8" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" - - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool - // These indicate if the metric name from the current line being parsed is inside - // braces and if that metric name was found respectively. - currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err != nil && errors.Is(p.err, io.EOF) { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - p.currentMF = nil -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - p.currentMetricIsInsideBraces = false - p.currentMetricInsideBracesIsPresent = false - if p.skipBlankTab(); p.err != nil { - // This is the only place that we expect to see io.EOF, - // which is not an error but the signal that we are done. - // Any other error that happens to align with the start of - // a line is still an error. - if errors.Is(p.err, io.EOF) { - p.err = nil - } - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - case '{': - p.currentMetricIsInsideBraces = true - return p.readingLabels - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) - p.currentLabelPairs = nil - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - if p.currentMetricIsInsideBraces { - if p.currentMetricInsideBracesIsPresent { - p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) - return nil - } - switch p.currentByte { - case ',': - p.setOrCreateCurrentMF() - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - p.currentMetricInsideBracesIsPresent = true - return p.startLabelName - case '}': - p.setOrCreateCurrentMF() - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) - p.currentLabelPairs = nil - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) - return nil - } - } - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - p.currentLabelPairs = nil - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && - (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { - p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) - } - // Check for duplicate label names. - labels := make(map[string]struct{}) - for _, l := range p.currentLabelPairs { - lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { - p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) - p.currentLabelPairs = nil - return nil - } - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - p.currentLabelPairs = nil - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.currentMF == nil { - p.parseError("invalid metric name") - return nil - } - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) - p.currentLabelPairs = nil - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) - p.currentLabelPairs = nil - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := parseFloat(p.currentToken.String()) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - case '"': - p.currentToken.WriteByte('"') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - // A UTF-8 metric name must be quoted and may have escaped characters. - quoted := false - escaped := false - if !isValidMetricNameStart(p.currentByte) { - return - } - for p.err == nil { - if escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - case '"': - p.currentToken.WriteByte('"') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '"': - quoted = !quoted - if !quoted { - p.currentByte, p.err = p.buf.ReadByte() - return - } - case '\n': - p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - // A UTF-8 label name must be quoted and may have escaped characters. - quoted := false - escaped := false - if !isValidLabelNameStart(p.currentByte) { - return - } - for p.err == nil { - if escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - case '"': - p.currentToken.WriteByte('"') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '"': - quoted = !quoted - if !quoted { - p.currentByte, p.err = p.buf.ReadByte() - return - } - case '\n': - p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - p.currentLabelPairs = nil - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' -} - -func isValidLabelNameContinuation(b byte, quoted bool) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte, quoted bool) bool { - return isValidLabelNameContinuation(b, quoted) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} - -func parseFloat(s string) (float64, error) { - if strings.ContainsAny(s, "pP_") { - return 0, errors.New("unsupported character in float") - } - return strconv.ParseFloat(s, 64) -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/alert.go b/openshift/tools/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 460f554f29..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "errors" - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true iff the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - return a.StatusAt(time.Now()) -} - -// StatusAt returns the status of the alert at the given timestamp. -func (a *Alert) StatusAt(ts time.Time) AlertStatus { - if a.ResolvedAt(ts) { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return errors.New("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return errors.New("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %w", err) - } - if len(a.Labels) == 0 { - return errors.New("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %w", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// HasFiringAt returns true iff one of the alerts is not resolved -// at the time ts. -func (as Alerts) HasFiringAt(ts time.Time) bool { - for _, a := range as { - if !a.ResolvedAt(ts) { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} - -// StatusAt returns StatusFiring iff at least one of the alerts is firing -// at the time ts. -func (as Alerts) StatusAt(ts time.Time) AlertStatus { - if as.HasFiringAt(ts) { - return AlertFiring - } - return AlertResolved -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/fingerprinting.go b/openshift/tools/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de4106e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/fnv.go b/openshift/tools/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 367afecd30..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializes a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/labels.go b/openshift/tools/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index de83afe93e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ScrapeIntervalLabel is the name of the label that holds the scrape interval - // used to scrape a target. - ScrapeIntervalLabel = "__scrape_interval__" - - // ScrapeTimeoutLabel is the name of the label that holds the scrape - // timeout used to scrape a target. - ScrapeTimeoutLabel = "__scrape_timeout__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid returns true iff the name matches the pattern of LabelNameRE when -// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if -// NameValidationScheme is set to UTF8Validation. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } -} - -// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for -// legacy names. It does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - // TODO: Apply De Morgan's law. Make sure there are tests for this. - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF-8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/labelset.go b/openshift/tools/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index d0ad88da33..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/labelset_string.go b/openshift/tools/vendor/github.com/prometheus/common/model/labelset_string.go deleted file mode 100644 index abb2c90018..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/labelset_string.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "bytes" - "slices" - "strconv" -) - -// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. -func (l LabelSet) String() string { - var lna [32]string // On stack to avoid memory allocation for sorting names. - labelNames := lna[:0] - for name := range l { - labelNames = append(labelNames, string(name)) - } - slices.Sort(labelNames) - var bytea [1024]byte // On stack to avoid memory allocation while building the output. - b := bytes.NewBuffer(bytea[:0]) - b.WriteByte('{') - for i, name := range labelNames { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(name) - b.WriteByte('=') - b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)]))) - } - b.WriteByte('}') - return b.String() -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/metadata.go b/openshift/tools/vendor/github.com/prometheus/common/model/metadata.go deleted file mode 100644 index 447ab8ad63..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/metadata.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// MetricType represents metric type values. -type MetricType string - -const ( - MetricTypeCounter = MetricType("counter") - MetricTypeGauge = MetricType("gauge") - MetricTypeHistogram = MetricType("histogram") - MetricTypeGaugeHistogram = MetricType("gaugehistogram") - MetricTypeSummary = MetricType("summary") - MetricTypeInfo = MetricType("info") - MetricTypeStateset = MetricType("stateset") - MetricTypeUnknown = MetricType("unknown") -) diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/metric.go b/openshift/tools/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index a6b01755bd..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "errors" - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -var ( - // NameValidationScheme determines the global default method of the name - // validation to be used by all calls to IsValidMetricName() and LabelName - // IsValid(). - // - // Deprecated: This variable should not be used and might be removed in the - // far future. If you wish to stick to the legacy name validation use - // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods - // instead. This variable is here as an escape hatch for emergency cases, - // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., - // to delay UTF-8 migrations in time or aid in debugging unforeseen results of - // the change. In such a case, a temporary assignment to `LegacyValidation` - // value in the `init()` function in your main.go or so, could be considered. - // - // Historically we opted for a global variable for feature gating different - // validation schemes in operations that were not otherwise easily adjustable - // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate - // Labels structure or package might have been a better choice. Given the - // change was made and many upgraded the common already, we live this as-is - // with this warning and learning for the future. - NameValidationScheme = UTF8Validation - - // NameEscapingScheme defines the default way that names will be escaped when - // presented to systems that do not support UTF-8 names. If the Content-Type - // "escaping" term is specified, that will override this value. - // NameEscapingScheme should not be set to the NoEscaping value. That string - // is used in content negotiation to indicate that a system supports UTF-8 and - // has that feature enabled. - NameEscapingScheme = UnderscoreEscaping -) - -// ValidationScheme is a Go enum for determining how metric and label names will -// be validated by this library. -type ValidationScheme int - -const ( - // LegacyValidation is a setting that requires that all metric and label names - // conform to the original Prometheus character requirements described by - // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota - - // UTF8Validation only requires that metric and label names be valid UTF-8 - // strings. - UTF8Validation -) - -type EscapingScheme int - -const ( - // NoEscaping indicates that a name will not be escaped. Unescaped names that - // do not conform to the legacy validity check will use a new exposition - // format syntax that will be officially standardized in future versions. - NoEscaping EscapingScheme = iota - - // UnderscoreEscaping replaces all legacy-invalid characters with underscores. - UnderscoreEscaping - - // DotsEscaping is similar to UnderscoreEscaping, except that dots are - // converted to `_dot_` and pre-existing underscores are converted to `__`. - DotsEscaping - - // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid - // characters with the unicode value, surrounded by underscores. Single - // underscores are replaced with double underscores. - ValueEncodingEscaping -) - -const ( - // EscapingKey is the key in an Accept or Content-Type header that defines how - // metric and label names that do not conform to the legacy character - // requirements should be escaped when being scraped by a legacy prometheus - // system. If a system does not explicitly pass an escaping parameter in the - // Accept header, the default NameEscapingScheme will be used. - EscapingKey = "escaping" - - // Possible values for Escaping Key: - AllowUTF8 = "allow-utf-8" // No escaping required. - EscapeUnderscores = "underscores" - EscapeDots = "dots" - EscapeValues = "values" -) - -// MetricNameRE is a regular expression matching valid metric -// names. Note that the IsValidMetricName function performs the same -// check but faster than a match with this regular expression. -var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE -// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is -// selected. -func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } -} - -// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the -// legacy validation scheme regardless of the value of NameValidationScheme. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true -} - -// EscapeMetricFamily escapes the given metric names and labels with the given -// escaping scheme. Returns a new object that uses the same pointers to fields -// when possible and creates new escaped versions so as not to mutate the -// input. -func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { - if v == nil { - return nil - } - - if scheme == NoEscaping { - return v - } - - out := &dto.MetricFamily{ - Help: v.Help, - Type: v.Type, - Unit: v.Unit, - } - - // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { - out.Name = v.Name - } else { - out.Name = proto.String(EscapeName(v.GetName(), scheme)) - } - for _, m := range v.Metric { - if !metricNeedsEscaping(m) { - out.Metric = append(out.Metric, m) - continue - } - - escaped := &dto.Metric{ - Gauge: m.Gauge, - Counter: m.Counter, - Summary: m.Summary, - Untyped: m.Untyped, - Histogram: m.Histogram, - TimestampMs: m.TimestampMs, - } - - for _, l := range m.Label { - if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { - escaped.Label = append(escaped.Label, l) - continue - } - escaped.Label = append(escaped.Label, &dto.LabelPair{ - Name: proto.String(MetricNameLabel), - Value: proto.String(EscapeName(l.GetValue(), scheme)), - }) - continue - } - if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { - escaped.Label = append(escaped.Label, l) - continue - } - escaped.Label = append(escaped.Label, &dto.LabelPair{ - Name: proto.String(EscapeName(l.GetName(), scheme)), - Value: l.Value, - }) - } - out.Metric = append(out.Metric, escaped) - } - return out -} - -func metricNeedsEscaping(m *dto.Metric) bool { - for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { - return true - } - if !IsValidLegacyMetricName(l.GetName()) { - return true - } - } - return false -} - -// EscapeName escapes the incoming name according to the provided escaping -// scheme. Depending on the rules of escaping, this may cause no change in the -// string that is returned. (Especially NoEscaping, which by definition is a -// noop). This function does not do any validation of the name. -func EscapeName(name string, scheme EscapingScheme) string { - if len(name) == 0 { - return name - } - var escaped strings.Builder - switch scheme { - case NoEscaping: - return name - case UnderscoreEscaping: - if IsValidLegacyMetricName(name) { - return name - } - for i, b := range name { - if isValidLegacyRune(b, i) { - escaped.WriteRune(b) - } else { - escaped.WriteRune('_') - } - } - return escaped.String() - case DotsEscaping: - // Do not early return for legacy valid names, we still escape underscores. - for i, b := range name { - if b == '_' { - escaped.WriteString("__") - } else if b == '.' { - escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { - escaped.WriteRune(b) - } else { - escaped.WriteString("__") - } - } - return escaped.String() - case ValueEncodingEscaping: - if IsValidLegacyMetricName(name) { - return name - } - escaped.WriteString("U__") - for i, b := range name { - if b == '_' { - escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { - escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { - escaped.WriteString("_FFFD_") - } else { - escaped.WriteRune('_') - escaped.WriteString(strconv.FormatInt(int64(b), 16)) - escaped.WriteRune('_') - } - } - return escaped.String() - default: - panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) - } -} - -// lower function taken from strconv.atoi -func lower(c byte) byte { - return c | ('x' - 'X') -} - -// UnescapeName unescapes the incoming name according to the provided escaping -// scheme if possible. Some schemes are partially or totally non-roundtripable. -// If any error is enountered, returns the original input. -func UnescapeName(name string, scheme EscapingScheme) string { - if len(name) == 0 { - return name - } - switch scheme { - case NoEscaping: - return name - case UnderscoreEscaping: - // It is not possible to unescape from underscore replacement. - return name - case DotsEscaping: - name = strings.ReplaceAll(name, "_dot_", ".") - name = strings.ReplaceAll(name, "__", "_") - return name - case ValueEncodingEscaping: - escapedName, found := strings.CutPrefix(name, "U__") - if !found { - return name - } - - var unescaped strings.Builder - TOP: - for i := 0; i < len(escapedName); i++ { - // All non-underscores are treated normally. - if escapedName[i] != '_' { - unescaped.WriteByte(escapedName[i]) - continue - } - i++ - if i >= len(escapedName) { - return name - } - // A double underscore is a single underscore. - if escapedName[i] == '_' { - unescaped.WriteByte('_') - continue - } - // We think we are in a UTF-8 code, process it. - var utf8Val uint - for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value based on the MaxRune - // value of '\U0010FFFF'. - if j >= 6 { - return name - } - // Found a closing underscore, convert to a rune, check validity, and append. - if escapedName[i] == '_' { - utf8Rune := rune(utf8Val) - if !utf8.ValidRune(utf8Rune) { - return name - } - unescaped.WriteRune(utf8Rune) - continue TOP - } - r := lower(escapedName[i]) - utf8Val *= 16 - if r >= '0' && r <= '9' { - utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { - utf8Val += uint(r) - 'a' + 10 - } else { - return name - } - i++ - } - // Didn't find closing underscore, invalid. - return name - } - return unescaped.String() - default: - panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) - } -} - -func isValidLegacyRune(b rune, i int) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) -} - -func (e EscapingScheme) String() string { - switch e { - case NoEscaping: - return AllowUTF8 - case UnderscoreEscaping: - return EscapeUnderscores - case DotsEscaping: - return EscapeDots - case ValueEncodingEscaping: - return EscapeValues - default: - panic(fmt.Sprintf("unknown format scheme %d", e)) - } -} - -func ToEscapingScheme(s string) (EscapingScheme, error) { - if s == "" { - return NoEscaping, errors.New("got empty string instead of escaping scheme") - } - switch s { - case AllowUTF8: - return NoEscaping, nil - case EscapeUnderscores: - return UnderscoreEscaping, nil - case EscapeDots: - return DotsEscaping, nil - case EscapeValues: - return ValueEncodingEscaping, nil - default: - return NoEscaping, fmt.Errorf("unknown format scheme %s", s) - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/model.go b/openshift/tools/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b9691707..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/signature.go b/openshift/tools/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index dc8a0026c4..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -// cache the signature of an empty label set. -var emptyLabelSignature = hashNew() - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/silence.go b/openshift/tools/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index 8f91a9702e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return errors.New("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return errors.New("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %w", err) - } - } - if s.StartsAt.IsZero() { - return errors.New("start time missing") - } - if s.EndsAt.IsZero() { - return errors.New("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return errors.New("start time must be before end time") - } - if s.CreatedBy == "" { - return errors.New("creator information missing") - } - if s.Comment == "" { - return errors.New("comment missing") - } - if s.CreatedAt.IsZero() { - return errors.New("creation timestamp missing") - } - return nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/time.go b/openshift/tools/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 5727452c1e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// Set implements pflag/flag.Value -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value -func (d *Duration) Type() string { - return "duration" -} - -func isdigit(c byte) bool { return c >= '0' && c <= '9' } - -// Units are required to go in order from biggest to smallest. -// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. -var unitMap = map[string]struct { - pos int - mult uint64 -}{ - "ms": {7, uint64(time.Millisecond)}, - "s": {6, uint64(time.Second)}, - "m": {5, uint64(time.Minute)}, - "h": {4, uint64(time.Hour)}, - "d": {3, uint64(24 * time.Hour)}, - "w": {2, uint64(7 * 24 * time.Hour)}, - "y": {1, uint64(365 * 24 * time.Hour)}, -} - -// ParseDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(s string) (Duration, error) { - switch s { - case "0": - // Allow 0 without a unit. - return 0, nil - case "": - return 0, errors.New("empty duration string") - } - - orig := s - var dur uint64 - lastUnitPos := 0 - - for s != "" { - if !isdigit(s[0]) { - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - // Consume [0-9]* - i := 0 - for ; i < len(s) && isdigit(s[i]); i++ { - } - v, err := strconv.ParseUint(s[:i], 10, 0) - if err != nil { - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - s = s[i:] - - // Consume unit. - for i = 0; i < len(s) && !isdigit(s[i]); i++ { - } - if i == 0 { - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - u := s[:i] - s = s[i:] - unit, ok := unitMap[u] - if !ok { - return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) - } - if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - lastUnitPos = unit.pos - // Check if the provided duration overflows time.Duration (> ~ 290years). - if v > 1<<63/unit.mult { - return 0, errors.New("duration out of range") - } - dur += v * unit.mult - if dur > 1<<63-1 { - return 0, errors.New("duration out of range") - } - } - return Duration(dur), nil -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" - ) - if ms == 0 { - return "0s" - } - - f := func(unit string, mult int64, exact bool) { - if exact && ms%mult != 0 { - return - } - if v := ms / mult; v > 0 { - r += fmt.Sprintf("%d%s", v, unit) - ms -= v * mult - } - } - - // Only format years and weeks if the remainder is zero, as it is often - // easier to read 90d than 12w6d. - f("y", 1000*60*60*24*365, true) - f("w", 1000*60*60*24*7, true) - - f("d", 1000*60*60*24, false) - f("h", 1000*60*60, false) - f("m", 1000*60, false) - f("s", 1000, false) - f("ms", 1, false) - - return r -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.String()) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *Duration) UnmarshalJSON(bytes []byte) error { - var s string - if err := json.Unmarshal(bytes, &s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface. -func (d *Duration) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (d *Duration) UnmarshalText(text []byte) error { - var err error - *d, err = ParseDuration(string(text)) - return err -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/value.go b/openshift/tools/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index 8050637d82..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strconv" - "strings" -) - -// ZeroSample is the pseudo zero-value of Sample used to signal a -// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, -// and metric nil. Note that the natural zero value of Sample has a timestamp -// of 0, which is possible to appear in a real Sample and thus not suitable -// to signal a non-existing Sample. -var ZeroSample = Sample{Timestamp: Earliest} - -// Sample is a sample pair associated with a metric. A single sample must either -// define Value or Histogram but not both. Histogram == nil implies the Value -// field is used, otherwise it should be ignored. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` - Histogram *SampleHistogram `json:"histogram"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - if s.Histogram != nil { - return s.Histogram.Equal(o.Histogram) - } - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - if s.Histogram != nil { - return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ - Timestamp: s.Timestamp, - Histogram: s.Histogram, - }) - } - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - if s.Histogram != nil { - v := struct { - Metric Metric `json:"metric"` - Histogram SampleHistogramPair `json:"histogram"` - }{ - Metric: s.Metric, - Histogram: SampleHistogramPair{ - Timestamp: s.Timestamp, - Histogram: s.Histogram, - }, - } - return json.Marshal(&v) - } - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - Histogram SampleHistogramPair `json:"histogram"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - Histogram: SampleHistogramPair{ - Timestamp: s.Timestamp, - Histogram: s.Histogram, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - if v.Histogram.Histogram != nil { - s.Timestamp = v.Histogram.Timestamp - s.Histogram = v.Histogram.Histogram - } else { - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - } - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - Histograms []SampleHistogramPair `json:"histograms"` -} - -func (ss SampleStream) String() string { - valuesLength := len(ss.Values) - vals := make([]string, valuesLength+len(ss.Histograms)) - for i, v := range ss.Values { - vals[i] = v.String() - } - for i, v := range ss.Histograms { - vals[i+valuesLength] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { - v := struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - Histograms []SampleHistogramPair `json:"histograms"` - }{ - Metric: ss.Metric, - Values: ss.Values, - Histograms: ss.Histograms, - } - return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { - v := struct { - Metric Metric `json:"metric"` - Histograms []SampleHistogramPair `json:"histograms"` - }{ - Metric: ss.Metric, - Histograms: ss.Histograms, - } - return json.Marshal(&v) - } else { - v := struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - }{ - Metric: ss.Metric, - Values: ss.Values, - } - return json.Marshal(&v) - } -} - -func (ss *SampleStream) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - Histograms []SampleHistogramPair `json:"histograms"` - }{ - Metric: ss.Metric, - Values: ss.Values, - Histograms: ss.Histograms, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - ss.Metric = v.Metric - ss.Values = v.Values - ss.Histograms = v.Histograms - - return nil -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %w", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/value_float.go b/openshift/tools/vendor/github.com/prometheus/common/model/value_float.go deleted file mode 100644 index 6bfc757d18..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/value_float.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "strconv" -) - -// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a -// non-existing sample pair. It is a SamplePair with timestamp Earliest and -// value 0.0. Note that the natural zero value of SamplePair has a timestamp -// of 0, which is possible to appear in a real SamplePair and thus not -// suitable to signal a non-existing SamplePair. -var ZeroSamplePair = SamplePair{Timestamp: Earliest} - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return errors.New("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/value_histogram.go b/openshift/tools/vendor/github.com/prometheus/common/model/value_histogram.go deleted file mode 100644 index 895e6a3e83..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/value_histogram.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" -) - -type FloatString float64 - -func (v FloatString) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -func (v FloatString) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -func (v *FloatString) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return errors.New("float value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = FloatString(f) - return nil -} - -type HistogramBucket struct { - Boundaries int32 - Lower FloatString - Upper FloatString - Count FloatString -} - -func (s HistogramBucket) MarshalJSON() ([]byte, error) { - b, err := json.Marshal(s.Boundaries) - if err != nil { - return nil, err - } - l, err := json.Marshal(s.Lower) - if err != nil { - return nil, err - } - u, err := json.Marshal(s.Upper) - if err != nil { - return nil, err - } - c, err := json.Marshal(s.Count) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil -} - -func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { - tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} - wantLen := len(tmp) - if err := json.Unmarshal(buf, &tmp); err != nil { - return err - } - if gotLen := len(tmp); gotLen != wantLen { - return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) - } - return nil -} - -func (s *HistogramBucket) Equal(o *HistogramBucket) bool { - return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) -} - -func (b HistogramBucket) String() string { - var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 - if lowerInclusive { - sb.WriteRune('[') - } else { - sb.WriteRune('(') - } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) - if upperInclusive { - sb.WriteRune(']') - } else { - sb.WriteRune(')') - } - fmt.Fprintf(&sb, ":%v", b.Count) - return sb.String() -} - -type HistogramBuckets []*HistogramBucket - -func (s HistogramBuckets) Equal(o HistogramBuckets) bool { - if len(s) != len(o) { - return false - } - - for i, bucket := range s { - if !bucket.Equal(o[i]) { - return false - } - } - return true -} - -type SampleHistogram struct { - Count FloatString `json:"count"` - Sum FloatString `json:"sum"` - Buckets HistogramBuckets `json:"buckets"` -} - -func (s SampleHistogram) String() string { - return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) -} - -func (s *SampleHistogram) Equal(o *SampleHistogram) bool { - return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) -} - -type SampleHistogramPair struct { - Timestamp Time - // Histogram should never be nil, it's only stored as pointer for efficiency. - Histogram *SampleHistogram -} - -func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { - if s.Histogram == nil { - return nil, errors.New("histogram is nil") - } - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Histogram) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { - tmp := []interface{}{&s.Timestamp, &s.Histogram} - wantLen := len(tmp) - if err := json.Unmarshal(buf, &tmp); err != nil { - return err - } - if gotLen := len(tmp); gotLen != wantLen { - return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) - } - if s.Histogram == nil { - return errors.New("histogram is null") - } - return nil -} - -func (s SampleHistogramPair) String() string { - return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) -} - -func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { - return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) -} diff --git a/openshift/tools/vendor/github.com/prometheus/common/model/value_type.go b/openshift/tools/vendor/github.com/prometheus/common/model/value_type.go deleted file mode 100644 index 726c50ee63..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/common/model/value_type.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" -) - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/.gitignore b/openshift/tools/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 7cc33ae4a7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/testdata/fixtures/ -/fixtures diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/.golangci.yml b/openshift/tools/vendor/github.com/prometheus/procfs/.golangci.yml deleted file mode 100644 index 3c3bf910fd..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/.golangci.yml +++ /dev/null @@ -1,45 +0,0 @@ -version: "2" -linters: - enable: - - forbidigo - - godot - - misspell - - revive - - testifylint - settings: - forbidigo: - forbid: - - pattern: ^fmt\.Print.*$ - msg: Do not commit print statements. - godot: - exclude: - # Ignore "See: URL". - - 'See:' - capital: true - misspell: - locale: US - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: - - gofmt - - goimports - settings: - goimports: - local-prefixes: - - github.com/prometheus/procfs - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/openshift/tools/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md deleted file mode 100644 index d325872bdf..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -# Prometheus Community Code of Conduct - -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/openshift/tools/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 853eb9d49b..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,121 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) a suitable maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). - -* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) - -## Steps to Contribute - -Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. - -Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). - -For quickly compiling and testing your changes do: -``` -make test # Make sure all the tests pass before you commit and push :) -``` - -We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. - -## Pull Request Checklist - -* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. - -* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). - -* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). - -* Add tests relevant to the fixed bug or new feature. - -## Dependency management - -The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. - -All dependencies are vendored in the `vendor/` directory. - -To add or update a new dependency, use the `go get` command: - -```bash -# Pick the latest tagged release. -go get example.com/some/module/pkg - -# Pick a specific version. -go get example.com/some/module/pkg@vX.Y.Z -``` - -Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: - - -```bash -# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. -GO111MODULE=on go mod tidy - -GO111MODULE=on go mod vendor -``` - -You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. - - -## API Implementation Guidelines - -### Naming and Documentation - -Public functions and structs should normally be named according to the file(s) being read and parsed. For example, -the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function -should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). - -### Reading vs. Parsing - -Most functionality in this library consists of reading files and then parsing the text into structured data. In most -cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and -a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested -directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types -such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files -in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. - -### /proc and /sys filesystem I/O - -The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. -Many of the files are changing continuously and the data being read can in some cases change between subsequent -reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls -to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the -full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of -the file. - -Note that parsing the file's contents can still be performed one line at a time. This is done by first reading -the full file, and then using a scanner on the `[]byte` or `string` containing the data. - -``` - data, err := util.ReadFileNoStat("/proc/cpuinfo") - if err != nil { - return err - } - reader := bytes.NewReader(data) - scanner := bufio.NewScanner(reader) -``` - -The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does -not bother to check the size of the file before reading. -``` - data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") -``` - diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/LICENSE b/openshift/tools/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/openshift/tools/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index e00f3b365b..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1,3 +0,0 @@ -* Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier -* Ben Kochie @SuperQ diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/Makefile b/openshift/tools/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 7edfe4d093..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -%/.unpacked: %.ttar - @echo ">> extracting fixtures $*" - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -fixtures: testdata/fixtures/.unpacked - -update_fixtures: - rm -vf testdata/fixtures/.unpacked - ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ - -.PHONY: build -build: - -.PHONY: test -test: testdata/fixtures/.unpacked common-test diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/Makefile.common b/openshift/tools/vendor/github.com/prometheus/procfs/Makefile.common deleted file mode 100644 index 0ed55c2ba2..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/Makefile.common +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -GOTEST := $(GO) test -GOTEST_DIR := -ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum 2> /dev/null),) - GOTEST_DIR := test-results - GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- -endif -endif - -PROMU_VERSION ?= 0.17.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -SKIP_GOLANGCI_LINT := -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v2.0.2 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) - # If we're in CI and there is an Actions file, that means the linter - # is being run in Actions, so we don't need to run it here. - ifneq (,$(SKIP_GOLANGCI_LINT)) - GOLANGCI_LINT := - else ifeq (,$(CIRCLE_JOB)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile -DOCKERBUILD_CONTEXT ?= ./ -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint yamllint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" - $(GO) mod download - -.PHONY: update-go-deps -update-go-deps: - @echo ">> updating Go dependencies" - @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ - done - $(GO) mod tidy - -.PHONY: common-test-short -common-test-short: $(GOTEST_DIR) - @echo ">> running short tests" - $(GOTEST) -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: $(GOTEST_DIR) - @echo ">> running all tests" - $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) - -$(GOTEST_DIR): - @mkdir -p $@ - -.PHONY: common-format -common-format: - @echo ">> formatting code" - $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" - $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -endif - -.PHONY: common-lint-fix -common-lint-fix: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint fix" - $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) -endif - -.PHONY: common-yamllint -common-yamllint: - @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint 2> /dev/null)) - @echo "yamllint not installed so skipping" -else - yamllint . -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: - @echo ">> running check for unused/missing packages in go.mod" - $(GO) mod tidy - @git diff --exit-code -- go.sum go.mod - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker-repo-name -common-docker-repo-name: - @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" - -DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ - | sed -e '/install -d/d' \ - | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef - -govulncheck: install-govulncheck - govulncheck ./... - -install-govulncheck: - command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/NOTICE b/openshift/tools/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9aa11..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/README.md b/openshift/tools/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 0718239cf1..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# procfs - -This package provides functions to retrieve system, kernel, and process -metrics from the pseudo-filesystems /proc and /sys. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs) -[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) - -## Usage - -The procfs library is organized by packages based on whether the gathered data is coming from -/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, -/sys, or both. For example, cpu statistics are gathered from -`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount -point is initialized, and then the stat information is read. - -```go -fs, err := procfs.NewFS("/proc") -stats, err := fs.Stat() -``` - -Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. - -```go - fs, err := blockdevice.NewFS("/proc", "/sys") - stats, err := fs.ProcDiskstats() -``` - -## Package Organization - -The packages in this project are organized according to (1) whether the data comes from the `/proc` or -`/sys` filesystem and (2) the type of information being retrieved. For example, most process information -can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives -is available in the `blockdevices` sub-package. - -## Building and Testing - -The procfs library is intended to be built as part of another application, so there are no distributable binaries. -However, most of the API includes unit tests which can be run with `make test`. - -### Updating Test Fixtures - -The procfs library includes a set of test fixtures which include many example files from -the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file -which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. - -```bash -rm -rf testdata/fixtures -make test -``` - -Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When -the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file -based on the updated `fixtures` directory. And finally, verify the changes using -`git diff testdata/fixtures.ttar`. diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/SECURITY.md b/openshift/tools/vendor/github.com/prometheus/procfs/SECURITY.md deleted file mode 100644 index fed02d85c7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/SECURITY.md +++ /dev/null @@ -1,6 +0,0 @@ -# Reporting a security issue - -The Prometheus security policy, including how to report vulnerabilities, can be -found here: - - diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/arp.go b/openshift/tools/vendor/github.com/prometheus/procfs/arp.go deleted file mode 100644 index 2e53344151..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/arp.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "net" - "os" - "strconv" - "strings" -) - -// Learned from include/uapi/linux/if_arp.h. -const ( - // Completed entry (ha valid). - ATFComplete = 0x02 - // Permanent entry. - ATFPermanent = 0x04 - // Publish entry. - ATFPublish = 0x08 - // Has requested trailers. - ATFUseTrailers = 0x10 - // Obsoleted: Want to use a netmask (only for proxy entries). - ATFNetmask = 0x20 - // Don't answer this addresses. - ATFDontPublish = 0x40 -) - -// ARPEntry contains a single row of the columnar data represented in -// /proc/net/arp. -type ARPEntry struct { - // IP address - IPAddr net.IP - // MAC address - HWAddr net.HardwareAddr - // Name of the device - Device string - // Flags - Flags byte -} - -// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, -// and then return a slice of ARPEntry's. -func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := os.ReadFile(fs.proc.Path("net/arp")) - if err != nil { - return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) - } - - return parseARPEntries(data) -} - -func parseARPEntries(data []byte) ([]ARPEntry, error) { - lines := strings.Split(string(data), "\n") - entries := make([]ARPEntry, 0) - var err error - const ( - expectedDataWidth = 6 - expectedHeaderWidth = 9 - ) - for _, line := range lines { - columns := strings.Fields(line) - width := len(columns) - - if width == expectedHeaderWidth || width == 0 { - continue - } else if width == expectedDataWidth { - entry, err := parseARPEntry(columns) - if err != nil { - return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) - } - entries = append(entries, entry) - } else { - return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) - } - - } - - return entries, err -} - -func parseARPEntry(columns []string) (ARPEntry, error) { - entry := ARPEntry{Device: columns[5]} - ip := net.ParseIP(columns[0]) - entry.IPAddr = ip - - if mac, err := net.ParseMAC(columns[3]); err == nil { - entry.HWAddr = mac - } else { - return ARPEntry{}, err - } - - if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { - entry.Flags = byte(flags) - } else { - return ARPEntry{}, err - } - - return entry, nil -} - -// IsComplete returns true if ARP entry is marked with complete flag. -func (entry *ARPEntry) IsComplete() bool { - return entry.Flags&ATFComplete != 0 -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/buddyinfo.go b/openshift/tools/vendor/github.com/prometheus/procfs/buddyinfo.go deleted file mode 100644 index 8380750090..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/buddyinfo.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// A BuddyInfo is the details parsed from /proc/buddyinfo. -// The data is comprised of an array of free fragments of each size. -// The sizes are 2^n*PAGE_SIZE, where n is the array index. -type BuddyInfo struct { - Node string - Zone string - Sizes []float64 -} - -// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) BuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.proc.Path("buddyinfo")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseBuddyInfo(file) -} - -func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { - var ( - buddyInfo = []BuddyInfo{} - scanner = bufio.NewScanner(r) - bucketCount = -1 - ) - - for scanner.Scan() { - var err error - line := scanner.Text() - parts := strings.Fields(line) - - if len(parts) < 4 { - return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) - } - - node := strings.TrimSuffix(parts[1], ",") - zone := strings.TrimSuffix(parts[3], ",") - arraySize := len(parts[4:]) - - if bucketCount == -1 { - bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) - } - } - - sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { - sizes[i], err = strconv.ParseFloat(parts[i+4], 64) - if err != nil { - return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) - } - } - - buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) - } - - return buddyInfo, scanner.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cmdline.go b/openshift/tools/vendor/github.com/prometheus/procfs/cmdline.go deleted file mode 100644 index bf4f3b48c0..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cmdline.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CmdLine returns the command line of the kernel. -func (fs FS) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cmdline")) - if err != nil { - return nil, err - } - - return strings.Fields(string(data)), nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo.go deleted file mode 100644 index f0950bb495..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. -type CPUInfo struct { - Processor uint - VendorID string - CPUFamily string - Model string - ModelName string - Stepping string - Microcode string - CPUMHz float64 - CacheSize string - PhysicalID string - Siblings uint - CoreID string - CPUCores uint - APICID string - InitialAPICID string - FPU string - FPUException string - CPUIDLevel uint - WP string - Flags []string - Bugs []string - BogoMips float64 - CLFlushSize uint - CacheAlignment uint - AddressSizes string - PowerManagement string -} - -var ( - cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) - cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) -) - -// CPUInfo returns information about current system CPUs. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) CPUInfo() ([]CPUInfo, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) - if err != nil { - return nil, err - } - return parseCPUInfo(data) -} - -func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "vendor", "vendor_id": - cpuinfo[i].VendorID = field[1] - case "cpu family": - cpuinfo[i].CPUFamily = field[1] - case "model": - cpuinfo[i].Model = field[1] - case "model name": - cpuinfo[i].ModelName = field[1] - case "stepping": - cpuinfo[i].Stepping = field[1] - case "microcode": - cpuinfo[i].Microcode = field[1] - case "cpu MHz": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "cache size": - cpuinfo[i].CacheSize = field[1] - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "apicid": - cpuinfo[i].APICID = field[1] - case "initial apicid": - cpuinfo[i].InitialAPICID = field[1] - case "fpu": - cpuinfo[i].FPU = field[1] - case "fpu_exception": - cpuinfo[i].FPUException = field[1] - case "cpuid level": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUIDLevel = uint(v) - case "wp": - cpuinfo[i].WP = field[1] - case "flags": - cpuinfo[i].Flags = strings.Fields(field[1]) - case "bugs": - cpuinfo[i].Bugs = strings.Fields(field[1]) - case "bogomips": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "clflush size": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CLFlushSize = uint(v) - case "cache_alignment": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CacheAlignment = uint(v) - case "address sizes": - cpuinfo[i].AddressSizes = field[1] - case "power management": - cpuinfo[i].PowerManagement = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - match, err := regexp.MatchString("^[Pp]rocessor", firstLine) - if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) - - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - featuresLine := "" - commonCPUInfo := CPUInfo{} - i := 0 - if strings.TrimSpace(field[0]) == "Processor" { - commonCPUInfo = CPUInfo{ModelName: field[1]} - i = -1 - } else { - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo = []CPUInfo{firstcpu} - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "BogoMIPS": - if i == -1 { - cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor - i++ - cpuinfo[i].Processor = 0 - } - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "Features": - featuresLine = line - case "model name": - cpuinfo[i].ModelName = field[1] - } - } - fields := strings.SplitN(featuresLine, ": ", 2) - for i := range cpuinfo { - cpuinfo[i].Flags = strings.Fields(fields[1]) - } - return cpuinfo, nil - -} - -func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - commonCPUInfo := CPUInfo{VendorID: field[1]} - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "bogomips per cpu": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - commonCPUInfo.BogoMips = v - case "features": - commonCPUInfo.Flags = strings.Fields(field[1]) - } - if strings.HasPrefix(line, "processor") { - match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) - if len(match) < 2 { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - cpu := commonCPUInfo - v, err := strconv.ParseUint(match[1], 0, 32) - if err != nil { - return nil, err - } - cpu.Processor = uint(v) - cpuinfo = append(cpuinfo, cpu) - } - if strings.HasPrefix(line, "cpu number") { - break - } - } - - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "cpu number": - i++ - case "cpu MHz dynamic": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - } - } - - return cpuinfo, nil -} - -func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "cpu model": - cpuinfo[i].ModelName = field[1] - case "BogoMIPS": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "CPU Family": - cpuinfo[i].CPUFamily = field[1] - case "Model Name": - cpuinfo[i].ModelName = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "cpu": - cpuinfo[i].VendorID = field[1] - case "clock": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - case "hart": - cpuinfo[i].CoreID = field[1] - case "isa": - cpuinfo[i].ModelName = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode - return nil, errors.New("not implemented") -} - -// firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line. -func firstNonEmptyLine(scanner *bufio.Scanner) string { - for scanner.Scan() { - line := scanner.Text() - if strings.TrimSpace(line) != "" { - return line - } - } - return "" -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_armx.go deleted file mode 100644 index 64cfd534c1..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 - -package procfs - -var parseCPUInfo = parseCPUInfoARM diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go deleted file mode 100644 index d88442f0ed..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoLoong diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go deleted file mode 100644 index c11207f3ab..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (mips || mipsle || mips64 || mips64le) -// +build linux -// +build mips mipsle mips64 mips64le - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_others.go deleted file mode 100644 index a6b2b3127c..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x - -package procfs - -var parseCPUInfo = parseCPUInfoDummy diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go deleted file mode 100644 index 003bc2ad4a..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go deleted file mode 100644 index 1c9b7313b6..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (riscv || riscv64) -// +build linux -// +build riscv riscv64 - -package procfs - -var parseCPUInfo = parseCPUInfoRISCV diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go deleted file mode 100644 index fa3686bc00..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoS390X diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_x86.go deleted file mode 100644 index a0ef55562e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 - -package procfs - -var parseCPUInfo = parseCPUInfoX86 diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/crypto.go b/openshift/tools/vendor/github.com/prometheus/procfs/crypto.go deleted file mode 100644 index 5f2a37a78b..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/crypto.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Crypto holds info parsed from /proc/crypto. -type Crypto struct { - Alignmask *uint64 - Async bool - Blocksize *uint64 - Chunksize *uint64 - Ctxsize *uint64 - Digestsize *uint64 - Driver string - Geniv string - Internal string - Ivsize *uint64 - Maxauthsize *uint64 - MaxKeysize *uint64 - MinKeysize *uint64 - Module string - Name string - Priority *int64 - Refcnt *int64 - Seedsize *uint64 - Selftest string - Type string - Walksize *uint64 -} - -// Crypto parses an crypto-file (/proc/crypto) and returns a slice of -// structs containing the relevant info. More information available here: -// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html -func (fs FS) Crypto() ([]Crypto, error) { - path := fs.proc.Path("crypto") - b, err := util.ReadFileNoStat(path) - if err != nil { - return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) - - } - - crypto, err := parseCrypto(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) - } - - return crypto, nil -} - -// parseCrypto parses a /proc/crypto stream into Crypto elements. -func parseCrypto(r io.Reader) ([]Crypto, error) { - var out []Crypto - - s := bufio.NewScanner(r) - for s.Scan() { - text := s.Text() - switch { - case strings.HasPrefix(text, "name"): - // Each crypto element begins with its name. - out = append(out, Crypto{}) - case text == "": - continue - } - - kv := strings.Split(text, ":") - if len(kv) != 2 { - return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) - } - - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - - // Parse the key/value pair into the currently focused element. - c := &out[len(out)-1] - if err := c.parseKV(k, v); err != nil { - return nil, err - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -// parseKV parses a key/value pair into the appropriate field of c. -func (c *Crypto) parseKV(k, v string) error { - vp := util.NewValueParser(v) - - switch k { - case "async": - // Interpret literal yes as true. - c.Async = v == "yes" - case "blocksize": - c.Blocksize = vp.PUInt64() - case "chunksize": - c.Chunksize = vp.PUInt64() - case "digestsize": - c.Digestsize = vp.PUInt64() - case "driver": - c.Driver = v - case "geniv": - c.Geniv = v - case "internal": - c.Internal = v - case "ivsize": - c.Ivsize = vp.PUInt64() - case "maxauthsize": - c.Maxauthsize = vp.PUInt64() - case "max keysize": - c.MaxKeysize = vp.PUInt64() - case "min keysize": - c.MinKeysize = vp.PUInt64() - case "module": - c.Module = v - case "name": - c.Name = v - case "priority": - c.Priority = vp.PInt64() - case "refcnt": - c.Refcnt = vp.PInt64() - case "seedsize": - c.Seedsize = vp.PUInt64() - case "selftest": - c.Selftest = v - case "type": - c.Type = v - case "walksize": - c.Walksize = vp.PUInt64() - } - - return vp.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/doc.go b/openshift/tools/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index f9d961e441..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -package procfs diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/fs.go b/openshift/tools/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 9bdaccc7c8..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "github.com/prometheus/procfs/internal/fs" -) - -// FS represents the pseudo-filesystem sys, which provides an interface to -// kernel data structures. -type FS struct { - proc fs.FS - isReal bool -} - -const ( - // DefaultMountPoint is the common mount point of the proc filesystem. - DefaultMountPoint = fs.DefaultProcMountPoint - - // SectorSize represents the size of a sector in bytes. - // It is specific to Linux block I/O operations. - SectorSize = 512 -) - -// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. -// It will error if the mount point directory can't be read or is a file. -func NewDefaultFS() (FS, error) { - return NewFS(DefaultMountPoint) -} - -// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error -// if the mount point directory can't be read or is a file. -func NewFS(mountPoint string) (FS, error) { - fs, err := fs.NewFS(mountPoint) - if err != nil { - return FS{}, err - } - - isReal, err := isRealProc(mountPoint) - if err != nil { - return FS{}, err - } - - return FS{fs, isReal}, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/openshift/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go deleted file mode 100644 index 1b5bdbdf84..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !freebsd && !linux -// +build !freebsd,!linux - -package procfs - -// isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct. -func isRealProc(_ string) (bool, error) { - return true, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/openshift/tools/vendor/github.com/prometheus/procfs/fs_statfs_type.go deleted file mode 100644 index 80df79c319..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build freebsd || linux -// +build freebsd linux - -package procfs - -import ( - "syscall" -) - -// isRealProc determines whether supplied mountpoint is really a proc filesystem. -func isRealProc(mountPoint string) (bool, error) { - stat := syscall.Statfs_t{} - err := syscall.Statfs(mountPoint, &stat) - if err != nil { - return false, err - } - - // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 - return stat.Type == 0x9fa0, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/fscache.go b/openshift/tools/vendor/github.com/prometheus/procfs/fscache.go deleted file mode 100644 index 7db8633077..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/fscache.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Fscacheinfo represents fscache statistics. -type Fscacheinfo struct { - // Number of index cookies allocated - IndexCookiesAllocated uint64 - // data storage cookies allocated - DataStorageCookiesAllocated uint64 - // Number of special cookies allocated - SpecialCookiesAllocated uint64 - // Number of objects allocated - ObjectsAllocated uint64 - // Number of object allocation failures - ObjectAllocationsFailure uint64 - // Number of objects that reached the available state - ObjectsAvailable uint64 - // Number of objects that reached the dead state - ObjectsDead uint64 - // Number of objects that didn't have a coherency check - ObjectsWithoutCoherencyCheck uint64 - // Number of objects that passed a coherency check - ObjectsWithCoherencyCheck uint64 - // Number of objects that needed a coherency data update - ObjectsNeedCoherencyCheckUpdate uint64 - // Number of objects that were declared obsolete - ObjectsDeclaredObsolete uint64 - // Number of pages marked as being cached - PagesMarkedAsBeingCached uint64 - // Number of uncache page requests seen - UncachePagesRequestSeen uint64 - // Number of acquire cookie requests seen - AcquireCookiesRequestSeen uint64 - // Number of acq reqs given a NULL parent - AcquireRequestsWithNullParent uint64 - // Number of acq reqs rejected due to no cache available - AcquireRequestsRejectedNoCacheAvailable uint64 - // Number of acq reqs succeeded - AcquireRequestsSucceeded uint64 - // Number of acq reqs rejected due to error - AcquireRequestsRejectedDueToError uint64 - // Number of acq reqs failed on ENOMEM - AcquireRequestsFailedDueToEnomem uint64 - // Number of lookup calls made on cache backends - LookupsNumber uint64 - // Number of negative lookups made - LookupsNegative uint64 - // Number of positive lookups made - LookupsPositive uint64 - // Number of objects created by lookup - ObjectsCreatedByLookup uint64 - // Number of lookups timed out and requeued - LookupsTimedOutAndRequed uint64 - InvalidationsNumber uint64 - InvalidationsRunning uint64 - // Number of update cookie requests seen - UpdateCookieRequestSeen uint64 - // Number of upd reqs given a NULL parent - UpdateRequestsWithNullParent uint64 - // Number of upd reqs granted CPU time - UpdateRequestsRunning uint64 - // Number of relinquish cookie requests seen - RelinquishCookiesRequestSeen uint64 - // Number of rlq reqs given a NULL parent - RelinquishCookiesWithNullParent uint64 - // Number of rlq reqs waited on completion of creation - RelinquishRequestsWaitingCompleteCreation uint64 - // Relinqs rtr - RelinquishRetries uint64 - // Number of attribute changed requests seen - AttributeChangedRequestsSeen uint64 - // Number of attr changed requests queued - AttributeChangedRequestsQueued uint64 - // Number of attr changed rejected -ENOBUFS - AttributeChangedRejectDueToEnobufs uint64 - // Number of attr changed failed -ENOMEM - AttributeChangedFailedDueToEnomem uint64 - // Number of attr changed ops given CPU time - AttributeChangedOps uint64 - // Number of allocation requests seen - AllocationRequestsSeen uint64 - // Number of successful alloc reqs - AllocationOkRequests uint64 - // Number of alloc reqs that waited on lookup completion - AllocationWaitingOnLookup uint64 - // Number of alloc reqs rejected -ENOBUFS - AllocationsRejectedDueToEnobufs uint64 - // Number of alloc reqs aborted -ERESTARTSYS - AllocationsAbortedDueToErestartsys uint64 - // Number of alloc reqs submitted - AllocationOperationsSubmitted uint64 - // Number of alloc reqs waited for CPU time - AllocationsWaitedForCPU uint64 - // Number of alloc reqs aborted due to object death - AllocationsAbortedDueToObjectDeath uint64 - // Number of retrieval (read) requests seen - RetrievalsReadRequests uint64 - // Number of successful retr reqs - RetrievalsOk uint64 - // Number of retr reqs that waited on lookup completion - RetrievalsWaitingLookupCompletion uint64 - // Number of retr reqs returned -ENODATA - RetrievalsReturnedEnodata uint64 - // Number of retr reqs rejected -ENOBUFS - RetrievalsRejectedDueToEnobufs uint64 - // Number of retr reqs aborted -ERESTARTSYS - RetrievalsAbortedDueToErestartsys uint64 - // Number of retr reqs failed -ENOMEM - RetrievalsFailedDueToEnomem uint64 - // Number of retr reqs submitted - RetrievalsRequests uint64 - // Number of retr reqs waited for CPU time - RetrievalsWaitingCPU uint64 - // Number of retr reqs aborted due to object death - RetrievalsAbortedDueToObjectDeath uint64 - // Number of storage (write) requests seen - StoreWriteRequests uint64 - // Number of successful store reqs - StoreSuccessfulRequests uint64 - // Number of store reqs on a page already pending storage - StoreRequestsOnPendingStorage uint64 - // Number of store reqs rejected -ENOBUFS - StoreRequestsRejectedDueToEnobufs uint64 - // Number of store reqs failed -ENOMEM - StoreRequestsFailedDueToEnomem uint64 - // Number of store reqs submitted - StoreRequestsSubmitted uint64 - // Number of store reqs granted CPU time - StoreRequestsRunning uint64 - // Number of pages given store req processing time - StorePagesWithRequestsProcessing uint64 - // Number of store reqs deleted from tracking tree - StoreRequestsDeleted uint64 - // Number of store reqs over store limit - StoreRequestsOverStoreLimit uint64 - // Number of release reqs against pages with no pending store - ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 - // Number of release reqs against pages stored by time lock granted - ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 - // Number of release reqs ignored due to in-progress store - ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores canceled due to release req - PageStoresCancelledByReleaseRequests uint64 - VmscanWaiting uint64 - // Number of times async ops added to pending queues - OpsPending uint64 - // Number of times async ops given CPU time - OpsRunning uint64 - // Number of times async ops queued for processing - OpsEnqueued uint64 - // Number of async ops canceled - OpsCancelled uint64 - // Number of async ops rejected due to object lookup/create failure - OpsRejected uint64 - // Number of async ops initialized - OpsInitialised uint64 - // Number of async ops queued for deferred release - OpsDeferred uint64 - // Number of async ops released (should equal ini=N when idle) - OpsReleased uint64 - // Number of deferred-release async ops garbage collected - OpsGarbageCollected uint64 - // Number of in-progress alloc_object() cache ops - CacheopAllocationsinProgress uint64 - // Number of in-progress lookup_object() cache ops - CacheopLookupObjectInProgress uint64 - // Number of in-progress lookup_complete() cache ops - CacheopLookupCompleteInPorgress uint64 - // Number of in-progress grab_object() cache ops - CacheopGrabObjectInProgress uint64 - CacheopInvalidations uint64 - // Number of in-progress update_object() cache ops - CacheopUpdateObjectInProgress uint64 - // Number of in-progress drop_object() cache ops - CacheopDropObjectInProgress uint64 - // Number of in-progress put_object() cache ops - CacheopPutObjectInProgress uint64 - // Number of in-progress attr_changed() cache ops - CacheopAttributeChangeInProgress uint64 - // Number of in-progress sync_cache() cache ops - CacheopSyncCacheInProgress uint64 - // Number of in-progress read_or_alloc_page() cache ops - CacheopReadOrAllocPageInProgress uint64 - // Number of in-progress read_or_alloc_pages() cache ops - CacheopReadOrAllocPagesInProgress uint64 - // Number of in-progress allocate_page() cache ops - CacheopAllocatePageInProgress uint64 - // Number of in-progress allocate_pages() cache ops - CacheopAllocatePagesInProgress uint64 - // Number of in-progress write_page() cache ops - CacheopWritePagesInProgress uint64 - // Number of in-progress uncache_page() cache ops - CacheopUncachePagesInProgress uint64 - // Number of in-progress dissociate_pages() cache ops - CacheopDissociatePagesInProgress uint64 - // Number of object lookups/creations rejected due to lack of space - CacheevLookupsAndCreationsRejectedLackSpace uint64 - // Number of stale objects deleted - CacheevStaleObjectsDeleted uint64 - // Number of objects retired when relinquished - CacheevRetiredWhenReliquished uint64 - // Number of objects culled - CacheevObjectsCulled uint64 -} - -// Fscacheinfo returns information about current fscache statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt -func (fs FS) Fscacheinfo() (Fscacheinfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) - if err != nil { - return Fscacheinfo{}, err - } - - m, err := parseFscacheinfo(bytes.NewReader(b)) - if err != nil { - return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) - } - - return *m, nil -} - -func setFSCacheFields(fields []string, setFields ...*uint64) error { - var err error - if len(fields) < len(setFields) { - return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) - } - - for i := range setFields { - *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) - if err != nil { - return err - } - } - return nil -} - -func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { - var m Fscacheinfo - s := bufio.NewScanner(r) - for s.Scan() { - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) - } - - switch fields[0] { - case "Cookies:": - err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, - &m.SpecialCookiesAllocated) - if err != nil { - return &m, err - } - case "Objects:": - err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, - &m.ObjectsAvailable, &m.ObjectsDead) - if err != nil { - return &m, err - } - case "ChkAux": - err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, - &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) - if err != nil { - return &m, err - } - case "Pages": - err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) - if err != nil { - return &m, err - } - case "Acquire:": - err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, - &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, - &m.AcquireRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - case "Lookups:": - err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, - &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) - if err != nil { - return &m, err - } - case "Invals": - err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) - if err != nil { - return &m, err - } - case "Updates:": - err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, - &m.UpdateRequestsRunning) - if err != nil { - return &m, err - } - case "Relinqs:": - err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, - &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) - if err != nil { - return &m, err - } - case "AttrChg:": - err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, - &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) - if err != nil { - return &m, err - } - case "Allocs": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, - &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, - &m.AllocationsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Retrvls:": - if strings.Split(fields[1], "=")[0] == "n" { - err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, - &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, - &m.RetrievalsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Stores": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, - &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, - &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) - if err != nil { - return &m, err - } - } - case "VmScan": - err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, - &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, - &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) - if err != nil { - return &m, err - } - case "Ops": - if strings.Split(fields[2], "=")[0] == "pend" { - err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) - if err != nil { - return &m, err - } - } - case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { - err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, - &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) - if err != nil { - return &m, err - } - } else if strings.Split(fields[1], "=")[0] == "inv" { - err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, - &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, - &m.CacheopSyncCacheInProgress) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, - &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, - &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) - if err != nil { - return &m, err - } - } - case "CacheEv:": - err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, - &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) - if err != nil { - return &m, err - } - } - } - - return &m, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/openshift/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go deleted file mode 100644 index 3a43e83915..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fs - -import ( - "fmt" - "os" - "path/filepath" -) - -const ( - // DefaultProcMountPoint is the common mount point of the proc filesystem. - DefaultProcMountPoint = "/proc" - - // DefaultSysMountPoint is the common mount point of the sys filesystem. - DefaultSysMountPoint = "/sys" - - // DefaultConfigfsMountPoint is the common mount point of the configfs. - DefaultConfigfsMountPoint = "/sys/kernel/config" - - // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. - DefaultSelinuxMountPoint = "/sys/fs/selinux" -) - -// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an -// interface to kernel data structures. -type FS string - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %q: %w", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %q is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path appends the given path elements to the filesystem path, adding separators -// as necessary. -func (fs FS) Path(p ...string) string { - return filepath.Join(append([]string{string(fs)}, p...)...) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go b/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 5a7d2df06a..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "errors" - "os" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ParsePInt64s parses a slice of strings into a slice of int64 pointers. -func ParsePInt64s(ss []string) ([]*int64, error) { - us := make([]*int64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, &u) - } - - return us, nil -} - -// Parses a uint64 from given hex in string. -func ParseHexUint64s(ss []string) ([]*uint64, error) { - us := make([]*uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return nil, err - } - - us = append(us, &u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// ReadIntFromFile reads a file and attempts to parse a int64 from it. -func ReadIntFromFile(path string) (int64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) -} - -// ParseBool parses a string into a boolean pointer. -func ParseBool(b string) *bool { - var truth bool - switch b { - case "enabled": - truth = true - case "disabled": - truth = false - default: - return nil - } - return &truth -} - -// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. -func ReadHexFromFile(path string) (uint64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - hexString := strings.TrimSpace(string(data)) - if !strings.HasPrefix(hexString, "0x") { - return 0, errors.New("invalid format: hex string does not start with '0x'") - } - return strconv.ParseUint(hexString[2:], 16, 64) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/readfile.go deleted file mode 100644 index 71b7a70ebd..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io" - "os" -) - -// ReadFileNoStat uses io.ReadAll to read contents of entire file. -// This is similar to os.ReadFile but without the call to os.Stat, because -// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 1024kB. For files larger than this, a scanner -// should be used. -func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 1024 - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - - reader := io.LimitReader(f, maxBufferSize) - return io.ReadAll(reader) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go deleted file mode 100644 index d5404a6d72..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (linux || darwin) && !appengine -// +build linux darwin -// +build !appengine - -package util - -import ( - "bytes" - "os" - "strconv" - "strings" - "syscall" -) - -// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -// -// Note that this function will not read files larger than 128 bytes. -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's os.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - const sysFileBufferSize = 128 - b := make([]byte, sysFileBufferSize) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} - -// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. -func SysReadUintFromFile(path string) (uint64, error) { - data, err := SysReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. -func SysReadIntFromFile(path string) (int64, error) { - data, err := SysReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go deleted file mode 100644 index 1d86f5e63f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (linux && appengine) || (!linux && !darwin) -// +build linux,appengine !linux,!darwin - -package util - -import ( - "fmt" -) - -// SysReadFile is here implemented as a noop for builds that do not support -// the read syscall. For example Windows, or Linux on Google App Engine. -func SysReadFile(file string) (string, error) { - return "", fmt.Errorf("not supported on this platform") -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/valueparser.go deleted file mode 100644 index fe2355d3c6..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "strconv" -) - -// TODO(mdlayher): util packages are an anti-pattern and this should be moved -// somewhere else that is more focused in the future. - -// A ValueParser enables parsing a single string into a variety of data types -// in a concise and safe way. The Err method must be invoked after invoking -// any other methods to ensure a value was successfully parsed. -type ValueParser struct { - v string - err error -} - -// NewValueParser creates a ValueParser using the input string. -func NewValueParser(v string) *ValueParser { - return &ValueParser{v: v} -} - -// Int interprets the underlying value as an int and returns that value. -func (vp *ValueParser) Int() int { return int(vp.int64()) } - -// PInt64 interprets the underlying value as an int64 and returns a pointer to -// that value. -func (vp *ValueParser) PInt64() *int64 { - if vp.err != nil { - return nil - } - - v := vp.int64() - return &v -} - -// int64 interprets the underlying value as an int64 and returns that value. -// TODO: export if/when necessary. -func (vp *ValueParser) int64() int64 { - if vp.err != nil { - return 0 - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseInt(vp.v, base, 64) - if err != nil { - vp.err = err - return 0 - } - - return v -} - -// PUInt64 interprets the underlying value as an uint64 and returns a pointer to -// that value. -func (vp *ValueParser) PUInt64() *uint64 { - if vp.err != nil { - return nil - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseUint(vp.v, base, 64) - if err != nil { - vp.err = err - return nil - } - - return &v -} - -// Err returns the last error, if any, encountered by the ValueParser. -func (vp *ValueParser) Err() error { - return vp.err -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/ipvs.go b/openshift/tools/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index bc3a20c932..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The remote (real) port. - RemotePort uint16 - // The local firewall mark - LocalMark string - // The transport protocol (TCP, UDP). - Proto string - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) IPVSStats() (IPVSStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - - return parseIPVSStats(bytes.NewReader(data)) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(r io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := io.ReadAll(r) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localMark string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = "" - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "FWM": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = fields[1] - localAddress = nil - localPort = 0 - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - LocalMark: localMark, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - var ( - ip net.IP - err error - ) - - switch len(s) { - case 13: - ip, err = hex.DecodeString(s[0:8]) - if err != nil { - return nil, 0, err - } - case 46: - ip = net.ParseIP(s[1:40]) - if ip == nil { - return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) - } - default: - return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) - } - - portString := s[len(s)-4:] - if len(portString) != 4 { - return nil, 0, - fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) - } - port, err := strconv.ParseUint(portString, 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/kernel_random.go b/openshift/tools/vendor/github.com/prometheus/procfs/kernel_random.go deleted file mode 100644 index db88566bdf..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/kernel_random.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "os" - - "github.com/prometheus/procfs/internal/util" -) - -// KernelRandom contains information about to the kernel's random number generator. -type KernelRandom struct { - // EntropyAvaliable gives the available entropy, in bits. - EntropyAvaliable *uint64 - // PoolSize gives the size of the entropy pool, in bits. - PoolSize *uint64 - // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. - URandomMinReseedSeconds *uint64 - // WriteWakeupThreshold the number of bits of entropy below which we wake up processes - // that do a select(2) or poll(2) for write access to /dev/random. - WriteWakeupThreshold *uint64 - // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep - // waiting for entropy from /dev/random. - ReadWakeupThreshold *uint64 -} - -// KernelRandom returns values from /proc/sys/kernel/random. -func (fs FS) KernelRandom() (KernelRandom, error) { - random := KernelRandom{} - - for file, p := range map[string]**uint64{ - "entropy_avail": &random.EntropyAvaliable, - "poolsize": &random.PoolSize, - "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, - "write_wakeup_threshold": &random.WriteWakeupThreshold, - "read_wakeup_threshold": &random.ReadWakeupThreshold, - } { - val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) - if os.IsNotExist(err) { - continue - } - if err != nil { - return random, err - } - *p = &val - } - - return random, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/loadavg.go b/openshift/tools/vendor/github.com/prometheus/procfs/loadavg.go deleted file mode 100644 index 332e76c17f..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/loadavg.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// LoadAvg represents an entry in /proc/loadavg. -type LoadAvg struct { - Load1 float64 - Load5 float64 - Load15 float64 -} - -// LoadAvg returns loadavg from /proc. -func (fs FS) LoadAvg() (*LoadAvg, error) { - path := fs.proc.Path("loadavg") - - data, err := util.ReadFileNoStat(path) - if err != nil { - return nil, err - } - return parseLoad(data) -} - -// Parse /proc loadavg and return 1m, 5m and 15m. -func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { - loads := make([]float64, 3) - parts := strings.Fields(string(loadavgBytes)) - if len(parts) < 3 { - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) - } - - var err error - for i, load := range parts[0:3] { - loads[i], err = strconv.ParseFloat(load, 64) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) - } - } - return &LoadAvg{ - Load1: loads[0], - Load5: loads[1], - Load15: loads[2], - }, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/mdstat.go b/openshift/tools/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index 67a9d2b448..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "regexp" - "strconv" - "strings" -) - -var ( - statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) - recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) - recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) - recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device requires. - DisksTotal int64 - // Number of failed disks. - DisksFailed int64 - // Number of "down" disks. (the _ indicator in the status line) - DisksDown int64 - // Spare disks in the device. - DisksSpare int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 - // Number of blocks on the device that need to be synced. - BlocksToBeSynced int64 - // progress percentage of current sync - BlocksSyncedPct float64 - // estimated finishing time for current sync (in minutes) - BlocksSyncedFinishTime float64 - // current sync speed (in Kilobytes/sec) - BlocksSyncedSpeed float64 - // Name of md component devices - Devices []string -} - -// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. More information available here: -// https://raid.wiki.kernel.org/index.php/Mdstat -func (fs FS) MDStat() ([]MDStat, error) { - data, err := os.ReadFile(fs.proc.Path("mdstat")) - if err != nil { - return nil, err - } - mdstat, err := parseMDStat(data) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) - } - return mdstat, nil -} - -// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. -func parseMDStat(mdStatData []byte) ([]MDStat, error) { - mdStats := []MDStat{} - lines := strings.Split(string(mdStatData), "\n") - - for i, line := range lines { - if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || - strings.HasPrefix(line, "unused") { - continue - } - - deviceFields := strings.Fields(line) - if len(deviceFields) < 3 { - return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) - } - mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive - - if len(lines) <= i+3 { - return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) - } - - // Failed disks have the suffix (F) & Spare disks have the suffix (S). - fail := int64(strings.Count(line, "(F)")) - spare := int64(strings.Count(line, "(S)")) - active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) - - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) - } - - syncLineIdx := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - syncLineIdx++ - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - blocksSynced := size - blocksToBeSynced := size - speed := float64(0) - finish := float64(0) - pct := float64(0) - recovering := strings.Contains(lines[syncLineIdx], "recovery") - resyncing := strings.Contains(lines[syncLineIdx], "resync") - checking := strings.Contains(lines[syncLineIdx], "check") - - // Append recovery and resyncing state info. - if recovering || resyncing || checking { - if recovering { - state = "recovering" - } else if checking { - state = "checking" - } else { - state = "resyncing" - } - - // Handle case when resync=PENDING or resync=DELAYED. - if strings.Contains(lines[syncLineIdx], "PENDING") || - strings.Contains(lines[syncLineIdx], "DELAYED") { - blocksSynced = 0 - } else { - blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) - } - } - } - - mdStats = append(mdStats, MDStat{ - Name: mdName, - ActivityState: state, - DisksActive: active, - DisksFailed: fail, - DisksDown: down, - DisksSpare: spare, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: blocksSynced, - BlocksToBeSynced: blocksToBeSynced, - BlocksSyncedPct: pct, - BlocksSyncedFinishTime: finish, - BlocksSyncedSpeed: speed, - Devices: evalComponentDevices(deviceFields), - }) - } - - return mdStats, nil -} - -func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { - statusFields := strings.Fields(statusLine) - if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) - } - - sizeStr := statusFields[0] - size, err = strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) - } - - if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { - // In the device deviceLine, only disks have a number associated with them in []. - total = int64(strings.Count(deviceLine, "[")) - return total, total, 0, size, nil - } - - if strings.Contains(deviceLine, "inactive") { - return 0, 0, 0, size, nil - } - - matches := statusLineRE.FindStringSubmatch(statusLine) - if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) - } - down = int64(strings.Count(matches[4], "_")) - - return active, total, down, size, nil -} - -func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { - matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) - } - - blocks := strings.Split(matches[1], "/") - blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) - if err != nil { - return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) - } - - blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) - if err != nil { - return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) - } - - // Get percentage complete - matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) - } - pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) - if err != nil { - return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) - } - - // Get time expected left to complete - matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) - } - finish, err = strconv.ParseFloat(matches[1], 64) - if err != nil { - return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) - } - - // Get recovery speed - matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) - } - speed, err = strconv.ParseFloat(matches[1], 64) - if err != nil { - return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) - } - - return blocksSynced, blocksToBeSynced, pct, finish, speed, nil -} - -func evalComponentDevices(deviceFields []string) []string { - mdComponentDevices := make([]string, 0) - if len(deviceFields) > 3 { - for _, field := range deviceFields[4:] { - match := componentDeviceRE.FindStringSubmatch(field) - if match == nil { - continue - } - mdComponentDevices = append(mdComponentDevices, match[1]) - } - } - - return mdComponentDevices -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/meminfo.go b/openshift/tools/vendor/github.com/prometheus/procfs/meminfo.go deleted file mode 100644 index 4b2c4050a3..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/meminfo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Meminfo represents memory statistics. -type Meminfo struct { - // Total usable ram (i.e. physical ram minus a few reserved - // bits and the kernel binary code) - MemTotal *uint64 - // The sum of LowFree+HighFree - MemFree *uint64 - // An estimate of how much memory is available for starting - // new applications, without swapping. Calculated from - // MemFree, SReclaimable, the size of the file LRU lists, and - // the low watermarks in each zone. The estimate takes into - // account that the system needs some page cache to function - // well, and that not all reclaimable slab will be - // reclaimable, due to items being in use. The impact of those - // factors will vary from system to system. - MemAvailable *uint64 - // Relatively temporary storage for raw disk blocks shouldn't - // get tremendously large (20MB or so) - Buffers *uint64 - Cached *uint64 - // Memory that once was swapped out, is swapped back in but - // still also is in the swapfile (if memory is needed it - // doesn't need to be swapped out AGAIN because it is already - // in the swapfile. This saves I/O) - SwapCached *uint64 - // Memory that has been used more recently and usually not - // reclaimed unless absolutely necessary. - Active *uint64 - // Memory which has been less recently used. It is more - // eligible to be reclaimed for other purposes - Inactive *uint64 - ActiveAnon *uint64 - InactiveAnon *uint64 - ActiveFile *uint64 - InactiveFile *uint64 - Unevictable *uint64 - Mlocked *uint64 - // total amount of swap space available - SwapTotal *uint64 - // Memory which has been evicted from RAM, and is temporarily - // on the disk - SwapFree *uint64 - // Memory which is waiting to get written back to the disk - Dirty *uint64 - // Memory which is actively being written back to the disk - Writeback *uint64 - // Non-file backed pages mapped into userspace page tables - AnonPages *uint64 - // files which have been mapped, such as libraries - Mapped *uint64 - Shmem *uint64 - // in-kernel data structures cache - Slab *uint64 - // Part of Slab, that might be reclaimed, such as caches - SReclaimable *uint64 - // Part of Slab, that cannot be reclaimed on memory pressure - SUnreclaim *uint64 - KernelStack *uint64 - // amount of memory dedicated to the lowest level of page - // tables. - PageTables *uint64 - // NFS pages sent to the server, but not yet committed to - // stable storage - NFSUnstable *uint64 - // Memory used for block device "bounce buffers" - Bounce *uint64 - // Memory used by FUSE for temporary writeback buffers - WritebackTmp *uint64 - // Based on the overcommit ratio ('vm.overcommit_ratio'), - // this is the total amount of memory currently available to - // be allocated on the system. This limit is only adhered to - // if strict overcommit accounting is enabled (mode 2 in - // 'vm.overcommit_memory'). - // The CommitLimit is calculated with the following formula: - // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * - // overcommit_ratio / 100 + [total swap pages] - // For example, on a system with 1G of physical RAM and 7G - // of swap with a `vm.overcommit_ratio` of 30 it would - // yield a CommitLimit of 7.3G. - // For more details, see the memory overcommit documentation - // in vm/overcommit-accounting. - CommitLimit *uint64 - // The amount of memory presently allocated on the system. - // The committed memory is a sum of all of the memory which - // has been allocated by processes, even if it has not been - // "used" by them as of yet. A process which malloc()'s 1G - // of memory, but only touches 300M of it will show up as - // using 1G. This 1G is memory which has been "committed" to - // by the VM and can be used at any time by the allocating - // application. With strict overcommit enabled on the system - // (mode 2 in 'vm.overcommit_memory'),allocations which would - // exceed the CommitLimit (detailed above) will not be permitted. - // This is useful if one needs to guarantee that processes will - // not fail due to lack of memory once that memory has been - // successfully allocated. - CommittedAS *uint64 - // total size of vmalloc memory area - VmallocTotal *uint64 - // amount of vmalloc area which is used - VmallocUsed *uint64 - // largest contiguous block of vmalloc area which is free - VmallocChunk *uint64 - Percpu *uint64 - HardwareCorrupted *uint64 - AnonHugePages *uint64 - ShmemHugePages *uint64 - ShmemPmdMapped *uint64 - CmaTotal *uint64 - CmaFree *uint64 - HugePagesTotal *uint64 - HugePagesFree *uint64 - HugePagesRsvd *uint64 - HugePagesSurp *uint64 - Hugepagesize *uint64 - DirectMap4k *uint64 - DirectMap2M *uint64 - DirectMap1G *uint64 - - // The struct fields below are the byte-normalized counterparts to the - // existing struct fields. Values are normalized using the optional - // unit field in the meminfo line. - MemTotalBytes *uint64 - MemFreeBytes *uint64 - MemAvailableBytes *uint64 - BuffersBytes *uint64 - CachedBytes *uint64 - SwapCachedBytes *uint64 - ActiveBytes *uint64 - InactiveBytes *uint64 - ActiveAnonBytes *uint64 - InactiveAnonBytes *uint64 - ActiveFileBytes *uint64 - InactiveFileBytes *uint64 - UnevictableBytes *uint64 - MlockedBytes *uint64 - SwapTotalBytes *uint64 - SwapFreeBytes *uint64 - DirtyBytes *uint64 - WritebackBytes *uint64 - AnonPagesBytes *uint64 - MappedBytes *uint64 - ShmemBytes *uint64 - SlabBytes *uint64 - SReclaimableBytes *uint64 - SUnreclaimBytes *uint64 - KernelStackBytes *uint64 - PageTablesBytes *uint64 - NFSUnstableBytes *uint64 - BounceBytes *uint64 - WritebackTmpBytes *uint64 - CommitLimitBytes *uint64 - CommittedASBytes *uint64 - VmallocTotalBytes *uint64 - VmallocUsedBytes *uint64 - VmallocChunkBytes *uint64 - PercpuBytes *uint64 - HardwareCorruptedBytes *uint64 - AnonHugePagesBytes *uint64 - ShmemHugePagesBytes *uint64 - ShmemPmdMappedBytes *uint64 - CmaTotalBytes *uint64 - CmaFreeBytes *uint64 - HugepagesizeBytes *uint64 - DirectMap4kBytes *uint64 - DirectMap2MBytes *uint64 - DirectMap1GBytes *uint64 -} - -// Meminfo returns an information about current kernel/system memory statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Meminfo() (Meminfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) - if err != nil { - return Meminfo{}, err - } - - m, err := parseMemInfo(bytes.NewReader(b)) - if err != nil { - return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) - } - - return *m, nil -} - -func parseMemInfo(r io.Reader) (*Meminfo, error) { - var m Meminfo - s := bufio.NewScanner(r) - for s.Scan() { - fields := strings.Fields(s.Text()) - var val, valBytes uint64 - - val, err := strconv.ParseUint(fields[1], 0, 64) - if err != nil { - return nil, err - } - - switch len(fields) { - case 2: - // No unit present, use the parsed the value as bytes directly. - valBytes = val - case 3: - // Unit present in optional 3rd field, convert it to - // bytes. The only unit supported within the Linux - // kernel is `kB`. - if fields[2] != "kB" { - return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) - } - - valBytes = 1024 * val - - default: - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) - } - - switch fields[0] { - case "MemTotal:": - m.MemTotal = &val - m.MemTotalBytes = &valBytes - case "MemFree:": - m.MemFree = &val - m.MemFreeBytes = &valBytes - case "MemAvailable:": - m.MemAvailable = &val - m.MemAvailableBytes = &valBytes - case "Buffers:": - m.Buffers = &val - m.BuffersBytes = &valBytes - case "Cached:": - m.Cached = &val - m.CachedBytes = &valBytes - case "SwapCached:": - m.SwapCached = &val - m.SwapCachedBytes = &valBytes - case "Active:": - m.Active = &val - m.ActiveBytes = &valBytes - case "Inactive:": - m.Inactive = &val - m.InactiveBytes = &valBytes - case "Active(anon):": - m.ActiveAnon = &val - m.ActiveAnonBytes = &valBytes - case "Inactive(anon):": - m.InactiveAnon = &val - m.InactiveAnonBytes = &valBytes - case "Active(file):": - m.ActiveFile = &val - m.ActiveFileBytes = &valBytes - case "Inactive(file):": - m.InactiveFile = &val - m.InactiveFileBytes = &valBytes - case "Unevictable:": - m.Unevictable = &val - m.UnevictableBytes = &valBytes - case "Mlocked:": - m.Mlocked = &val - m.MlockedBytes = &valBytes - case "SwapTotal:": - m.SwapTotal = &val - m.SwapTotalBytes = &valBytes - case "SwapFree:": - m.SwapFree = &val - m.SwapFreeBytes = &valBytes - case "Dirty:": - m.Dirty = &val - m.DirtyBytes = &valBytes - case "Writeback:": - m.Writeback = &val - m.WritebackBytes = &valBytes - case "AnonPages:": - m.AnonPages = &val - m.AnonPagesBytes = &valBytes - case "Mapped:": - m.Mapped = &val - m.MappedBytes = &valBytes - case "Shmem:": - m.Shmem = &val - m.ShmemBytes = &valBytes - case "Slab:": - m.Slab = &val - m.SlabBytes = &valBytes - case "SReclaimable:": - m.SReclaimable = &val - m.SReclaimableBytes = &valBytes - case "SUnreclaim:": - m.SUnreclaim = &val - m.SUnreclaimBytes = &valBytes - case "KernelStack:": - m.KernelStack = &val - m.KernelStackBytes = &valBytes - case "PageTables:": - m.PageTables = &val - m.PageTablesBytes = &valBytes - case "NFS_Unstable:": - m.NFSUnstable = &val - m.NFSUnstableBytes = &valBytes - case "Bounce:": - m.Bounce = &val - m.BounceBytes = &valBytes - case "WritebackTmp:": - m.WritebackTmp = &val - m.WritebackTmpBytes = &valBytes - case "CommitLimit:": - m.CommitLimit = &val - m.CommitLimitBytes = &valBytes - case "Committed_AS:": - m.CommittedAS = &val - m.CommittedASBytes = &valBytes - case "VmallocTotal:": - m.VmallocTotal = &val - m.VmallocTotalBytes = &valBytes - case "VmallocUsed:": - m.VmallocUsed = &val - m.VmallocUsedBytes = &valBytes - case "VmallocChunk:": - m.VmallocChunk = &val - m.VmallocChunkBytes = &valBytes - case "Percpu:": - m.Percpu = &val - m.PercpuBytes = &valBytes - case "HardwareCorrupted:": - m.HardwareCorrupted = &val - m.HardwareCorruptedBytes = &valBytes - case "AnonHugePages:": - m.AnonHugePages = &val - m.AnonHugePagesBytes = &valBytes - case "ShmemHugePages:": - m.ShmemHugePages = &val - m.ShmemHugePagesBytes = &valBytes - case "ShmemPmdMapped:": - m.ShmemPmdMapped = &val - m.ShmemPmdMappedBytes = &valBytes - case "CmaTotal:": - m.CmaTotal = &val - m.CmaTotalBytes = &valBytes - case "CmaFree:": - m.CmaFree = &val - m.CmaFreeBytes = &valBytes - case "HugePages_Total:": - m.HugePagesTotal = &val - case "HugePages_Free:": - m.HugePagesFree = &val - case "HugePages_Rsvd:": - m.HugePagesRsvd = &val - case "HugePages_Surp:": - m.HugePagesSurp = &val - case "Hugepagesize:": - m.Hugepagesize = &val - m.HugepagesizeBytes = &valBytes - case "DirectMap4k:": - m.DirectMap4k = &val - m.DirectMap4kBytes = &valBytes - case "DirectMap2M:": - m.DirectMap2M = &val - m.DirectMap2MBytes = &valBytes - case "DirectMap1G:": - m.DirectMap1G = &val - m.DirectMap1GBytes = &valBytes - } - } - - return &m, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/mountinfo.go b/openshift/tools/vendor/github.com/prometheus/procfs/mountinfo.go deleted file mode 100644 index a704c5e735..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/mountinfo.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A MountInfo is a type that describes the details, options -// for each mount, parsed from /proc/self/mountinfo. -// The fields described in each entry of /proc/self/mountinfo -// is described in the following man page. -// http://man7.org/linux/man-pages/man5/proc.5.html -type MountInfo struct { - // Unique ID for the mount - MountID int - // The ID of the parent mount - ParentID int - // The value of `st_dev` for the files on this FS - MajorMinorVer string - // The pathname of the directory in the FS that forms - // the root for this mount - Root string - // The pathname of the mount point relative to the root - MountPoint string - // Mount options - Options map[string]string - // Zero or more optional fields - OptionalFields map[string]string - // The Filesystem type - FSType string - // FS specific information or "none" - Source string - // Superblock options - SuperOptions map[string]string -} - -// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. -func parseMountInfo(info []byte) ([]*MountInfo, error) { - mounts := []*MountInfo{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseMountInfoString(mountString) - if err != nil { - return nil, err - } - mounts = append(mounts, parsedMounts) - } - - err := scanner.Err() - return mounts, err -} - -// Parses a mountinfo file line, and converts it to a MountInfo struct. -// An important check here is to see if the hyphen separator, as if it does not exist, -// it means that the line is malformed. -func parseMountInfoString(mountString string) (*MountInfo, error) { - var err error - - mountInfo := strings.Split(mountString, " ") - mountInfoLength := len(mountInfo) - if mountInfoLength < 10 { - return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) - } - - if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) - } - - mount := &MountInfo{ - MajorMinorVer: mountInfo[2], - Root: mountInfo[3], - MountPoint: mountInfo[4], - Options: mountOptionsParser(mountInfo[5]), - OptionalFields: nil, - FSType: mountInfo[mountInfoLength-3], - Source: mountInfo[mountInfoLength-2], - SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), - } - - mount.MountID, err = strconv.Atoi(mountInfo[0]) - if err != nil { - return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) - } - mount.ParentID, err = strconv.Atoi(mountInfo[1]) - if err != nil { - return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) - } - // Has optional fields, which is a space separated list of values. - // Example: shared:2 master:7 - if mountInfo[6] != "" { - mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) - if err != nil { - return nil, fmt.Errorf("%w: %w", ErrFileParse, err) - } - } - return mount, nil -} - -// mountOptionsIsValidField checks a string against a valid list of optional fields keys. -func mountOptionsIsValidField(s string) bool { - switch s { - case - "shared", - "master", - "propagate_from", - "unbindable": - return true - } - return false -} - -// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. -func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { - optionalFields := make(map[string]string) - for _, field := range o { - optionSplit := strings.SplitN(field, ":", 2) - value := "" - if len(optionSplit) == 2 { - value = optionSplit[1] - } - if mountOptionsIsValidField(optionSplit[0]) { - optionalFields[optionSplit[0]] = value - } - } - return optionalFields, nil -} - -// mountOptionsParser parses the mount options, superblock options. -func mountOptionsParser(mountOptions string) map[string]string { - opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { - splitOption := strings.Split(opt, "=") - if len(splitOption) < 2 { - key := splitOption[0] - opts[key] = "" - } else { - key, value := splitOption[0], splitOption[1] - opts[key] = value - } - } - return opts -} - -// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. -func GetMounts() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat("/proc/self/mountinfo") - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. -func GetProcMounts(pid int) ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/mountstats.go b/openshift/tools/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index 50caa73274..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 - - // Kernel version >= 4.14 MaxLen - // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 - fieldTransport11RDMAMaxLen = 28 - - // Kernel version <= 4.2 MinLen - // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 - fieldTransport11RDMAMinLen = 20 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The mount options of the NFS mount. - Opts map[string]string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport []NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read uint64 - // Number of bytes written using the write() syscall. - Write uint64 - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead uint64 - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite uint64 - // Number of bytes read from the NFS server, in total. - ReadTotal uint64 - // Number of bytes written to the NFS server, in total. - WriteTotal uint64 - // Number of pages read directly via mmap()'d files. - ReadPages uint64 - // Number of pages written directly via mmap()'d files. - WritePages uint64 -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate uint64 - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate uint64 - // Number of times an inode cache is cleared. - DataInvalidate uint64 - // Number of times cached inode attributes are invalidated. - AttributeInvalidate uint64 - // Number of times files or directories have been open()'d. - VFSOpen uint64 - // Number of times a directory lookup has occurred. - VFSLookup uint64 - // Number of times permissions have been checked. - VFSAccess uint64 - // Number of updates (and potential writes) to pages. - VFSUpdatePage uint64 - // Number of pages read directly via mmap()'d files. - VFSReadPage uint64 - // Number of times a group of pages have been read. - VFSReadPages uint64 - // Number of pages written directly via mmap()'d files. - VFSWritePage uint64 - // Number of times a group of pages have been written. - VFSWritePages uint64 - // Number of times directory entries have been read with getdents(). - VFSGetdents uint64 - // Number of times attributes have been set on inodes. - VFSSetattr uint64 - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush uint64 - // Number of times fsync() has been called on directories and files. - VFSFsync uint64 - // Number of times locking has been attempted on a file. - VFSLock uint64 - // Number of times files have been closed and released. - VFSFileRelease uint64 - // Unknown. Possibly unused. - CongestionWait uint64 - // Number of times files have been truncated. - Truncation uint64 - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension uint64 - // Number of times a file was removed while still open by another process. - SillyRename uint64 - // Number of times the NFS server gave less data than expected while reading. - ShortRead uint64 - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite uint64 - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay uint64 - // Number of NFS v4.1+ pNFS reads. - PNFSRead uint64 - // Number of NFS v4.1+ pNFS writes. - PNFSWrite uint64 -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests uint64 - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions uint64 - // Number of times a request has had a major timeout. - MajorTimeouts uint64 - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent uint64 - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived uint64 - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueMilliseconds uint64 - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseMilliseconds uint64 - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestMilliseconds uint64 - // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. - Errors uint64 -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string - // The local port used for the NFS mount. - Port uint64 - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind uint64 - // Number of times the client has made a TCP connection to the NFS server. - Connect uint64 - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime uint64 - // Duration since the NFS mount last saw any RPC traffic. - IdleTimeSeconds uint64 - // Number of RPC requests for this mount sent to the NFS server. - Sends uint64 - // Number of RPC responses for this mount received from the NFS server. - Receives uint64 - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs uint64 - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests uint64 - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog uint64 - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed uint64 - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue uint64 - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue uint64 - - // Stats below only available with stat version 1.1. - // Transport over RDMA - - // accessed when sending a call - ReadChunkCount uint64 - WriteChunkCount uint64 - ReplyChunkCount uint64 - TotalRdmaRequest uint64 - - // rarely accessed error counters - PullupCopyCount uint64 - HardwayRegisterCount uint64 - FailedMarshalCount uint64 - BadReplyCount uint64 - MrsRecovered uint64 - MrsOrphaned uint64 - MrsAllocated uint64 - EmptySendctxQ uint64 - - // accessed when receiving a reply - TotalRdmaReply uint64 - FixupCopyCount uint64 - ReplyWaitsForSend uint64 - LocalInvNeeded uint64 - NomsgCallCount uint64 - BcallCount uint64 -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldOpts = "opts:" - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - - switch ss[0] { - case fieldOpts: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) - } - if stats.Opts == nil { - stats.Opts = map[string]string{} - } - for _, opt := range strings.Split(ss[1], ",") { - split := strings.Split(opt, "=") - if len(split) == 2 { - stats.Opts[split[0]] = split[1] - } else { - stats.Opts[opt] = "" - } - } - case fieldAge: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) - } - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) - } - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) - } - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) - } - - tstats, err := parseNFSTransportStats(ss[1:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = append(stats.Transport, *tstats) - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them separately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) - } - - ns := make([]uint64, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) - } - - ns := make([]uint64, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Minimum number of expected fields in each per-operation statistics set - minFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) < minFields { - return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) - } - - // Skip string operation name for integers - ns := make([]uint64, 0, minFields-1) - for _, st := range ss[1:] { - n, err := strconv.ParseUint(st, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - opStats := NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueMilliseconds: ns[5], - CumulativeTotalResponseMilliseconds: ns[6], - CumulativeTotalRequestMilliseconds: ns[7], - } - - if len(ns) > 8 { - opStats.Errors = ns[8] - } - - ops = append(ops, opStats) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - - switch statVersion { - case statVersion10: - var expectedLength int - switch protocol { - case "tcp": - expectedLength = fieldTransport10TCPLen - case "udp": - expectedLength = fieldTransport10UDPLen - default: - return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) - } - case statVersion11: - var expectedLength int - switch protocol { - case "tcp": - expectedLength = fieldTransport11TCPLen - case "udp": - expectedLength = fieldTransport11UDPLen - case "rdma": - expectedLength = fieldTransport11RDMAMinLen - default: - return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) - } - if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || - (protocol == "rdma" && len(ss) < expectedLength) { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) - } - default: - return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. - // - // Note: slice length must be set to length of v1.1 stats to avoid a panic when - // only v1.0 stats are present. - // See: https://github.com/prometheus/node_exporter/issues/571. - // - // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen - ns := make([]uint64, fieldTransport11RDMAMaxLen+3) - for i, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns[i] = n - } - - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - switch protocol { - case "udp": - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - case "tcp": - ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - case "rdma": - ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) - } - - return &NFSTransportStats{ - // NFS xprt over tcp or udp - Protocol: protocol, - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTimeSeconds: ns[4], - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - - // NFS xprt over tcp or udp - // And statVersion 1.1 - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - - // NFS xprt over rdma - // And stat Version 1.1 - ReadChunkCount: ns[13], - WriteChunkCount: ns[14], - ReplyChunkCount: ns[15], - TotalRdmaRequest: ns[16], - PullupCopyCount: ns[17], - HardwayRegisterCount: ns[18], - FailedMarshalCount: ns[19], - BadReplyCount: ns[20], - MrsRecovered: ns[21], - MrsOrphaned: ns[22], - MrsAllocated: ns[23], - EmptySendctxQ: ns[24], - TotalRdmaReply: ns[25], - FixupCopyCount: ns[26], - ReplyWaitsForSend: ns[27], - LocalInvNeeded: ns[28], - NomsgCallCount: ns[29], - BcallCount: ns[30], - }, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_conntrackstat.go deleted file mode 100644 index 316df5fbb7..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core. -type ConntrackStatEntry struct { - Entries uint64 - Searched uint64 - Found uint64 - New uint64 - Invalid uint64 - Ignore uint64 - Delete uint64 - DeleteList uint64 - Insert uint64 - InsertFailed uint64 - Drop uint64 - EarlyDrop uint64 - SearchRestart uint64 -} - -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. -func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { - return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) -} - -// Parses a slice of ConntrackStatEntries from the given filepath. -func readConntrackStat(path string) ([]ConntrackStatEntry, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(path) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseConntrackStat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) - } - - return stat, nil -} - -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. -func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { - var entries []ConntrackStatEntry - - scanner := bufio.NewScanner(r) - scanner.Scan() - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - conntrackEntry, err := parseConntrackStatEntry(fields) - if err != nil { - return nil, err - } - entries = append(entries, *conntrackEntry) - } - - return entries, nil -} - -// Parses a ConntrackStatEntry from given array of fields. -func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - entries, err := util.ParseHexUint64s(fields) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) - } - numEntries := len(entries) - if numEntries < 16 || numEntries > 17 { - return nil, - fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) - } - - stats := &ConntrackStatEntry{ - Entries: *entries[0], - Searched: *entries[1], - Found: *entries[2], - New: *entries[3], - Invalid: *entries[4], - Ignore: *entries[5], - Delete: *entries[6], - DeleteList: *entries[7], - Insert: *entries[8], - InsertFailed: *entries[9], - Drop: *entries[10], - EarlyDrop: *entries[11], - } - - // Ignore missing search_restart on Linux < 2.6.35. - if numEntries == 17 { - stats.SearchRestart = *entries[16] - } - - return stats, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_dev.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index e66208aa05..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NetDev() (NetDev, error) { - return newNetDev(fs.proc.Path("net/dev")) -} - -// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - netDev := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := netDev.parseLine(s.Text()) - if err != nil { - return netDev, err - } - - netDev[line.Name] = *line - } - - return netDev, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - idx := strings.LastIndex(rawLine, ":") - if idx == -1 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(rawLine[:idx]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (netDev NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(netDev)) - for _, ifc := range netDev { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go deleted file mode 100644 index f50b38e352..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "io" - "os" - "strconv" - "strings" -) - -// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. -// The outer map's keys are interface names and the inner map's keys are stat names. -// -// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. -type NetDevSNMP6 map[string]map[string]uint64 - -// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ -// directory. -func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { - return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) -} - -// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ -// directory. -func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { - return newNetDevSNMP6(p.path("net/dev_snmp6")) -} - -// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. -func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { - netDevSNMP6 := make(NetDevSNMP6) - - // The net/dev_snmp6 folders contain one file per interface - ifaceFiles, err := os.ReadDir(dir) - if err != nil { - // On systems with IPv6 disabled, this directory won't exist. - // Do nothing. - if errors.Is(err, os.ErrNotExist) { - return netDevSNMP6, err - } - return netDevSNMP6, err - } - - for _, iFaceFile := range ifaceFiles { - f, err := os.Open(dir + "/" + iFaceFile.Name()) - if err != nil { - return netDevSNMP6, err - } - defer f.Close() - - netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) - if err != nil { - return netDevSNMP6, err - } - } - - return netDevSNMP6, nil -} - -func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { - m := make(map[string]uint64) - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - stat := strings.Fields(scanner.Text()) - if len(stat) < 2 { - continue - } - key, val := stat[0], stat[1] - - // Expect stat name to contain "6" or be "ifIndex" - if strings.Contains(key, "6") || key == "ifIndex" { - v, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return m, err - } - - m[key] = v - } - } - return m, scanner.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go deleted file mode 100644 index 19e3378f72..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" -) - -const ( - // Maximum size limit used by io.LimitReader while reading the content of the - // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic - // as each line represents a single used socket. - // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. - // With e.g. 150 Byte per line and the maximum number of 65535, - // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. - readLimit = 4294967296 // Byte -> 4 GiB -) - -// This contains generic data structures for both udp and tcp sockets. -type ( - // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. - NetIPSocket []*netIPSocketLine - - // NetIPSocketSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetIPSocket it does not collect - // the parsed lines into a slice. - NetIPSocketSummary struct { - // TxQueueLength shows the total queue length of all parsed tx_queue lengths. - TxQueueLength uint64 - // RxQueueLength shows the total queue length of all parsed rx_queue lengths. - RxQueueLength uint64 - // UsedSockets shows the total number of parsed lines representing the - // number of used sockets. - UsedSockets uint64 - // Drops shows the total number of dropped packets of all UDP sockets. - Drops *uint64 - } - - // A single line parser for fields from /proc/net/{t,u}dp{,6}. - // Fields which are not used by IPSocket are skipped. - // Drops is non-nil for udp{,6}, but nil for tcp{,6}. - // For the proc file format details, see https://linux.die.net/man/5/proc. - netIPSocketLine struct { - Sl uint64 - LocalAddr net.IP - LocalPort uint64 - RemAddr net.IP - RemPort uint64 - St uint64 - TxQueue uint64 - RxQueue uint64 - UID uint64 - Inode uint64 - Drops *uint64 - } -) - -func newNetIPSocket(file string) (NetIPSocket, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocket NetIPSocket - isUDP := strings.Contains(file, "udp") - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields, isUDP) - if err != nil { - return nil, err - } - netIPSocket = append(netIPSocket, line) - } - if err := s.Err(); err != nil { - return nil, err - } - return netIPSocket, nil -} - -// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. -func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocketSummary NetIPSocketSummary - var udpPacketDrops uint64 - isUDP := strings.Contains(file, "udp") - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields, isUDP) - if err != nil { - return nil, err - } - netIPSocketSummary.TxQueueLength += line.TxQueue - netIPSocketSummary.RxQueueLength += line.RxQueue - netIPSocketSummary.UsedSockets++ - if isUDP { - udpPacketDrops += *line.Drops - netIPSocketSummary.Drops = &udpPacketDrops - } - } - if err := s.Err(); err != nil { - return nil, err - } - return &netIPSocketSummary, nil -} - -// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. - -func parseIP(hexIP string) (net.IP, error) { - var byteIP []byte - byteIP, err := hex.DecodeString(hexIP) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) - } - switch len(byteIP) { - case 4: - return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil - case 16: - i := net.IP{ - byteIP[3], byteIP[2], byteIP[1], byteIP[0], - byteIP[7], byteIP[6], byteIP[5], byteIP[4], - byteIP[11], byteIP[10], byteIP[9], byteIP[8], - byteIP[15], byteIP[14], byteIP[13], byteIP[12], - } - return i, nil - default: - return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) - } -} - -// parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { - line := &netIPSocketLine{} - if len(fields) < 10 { - return nil, fmt.Errorf( - "%w: Less than 10 columns found %q", - ErrFileParse, - strings.Join(fields, " "), - ) - } - var err error // parse error - - // sl - s := strings.Split(fields[0], ":") - if len(s) != 2 { - return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) - } - - if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) - } - // local_address - l := strings.Split(fields[1], ":") - if len(l) != 2 { - return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) - } - if line.LocalAddr, err = parseIP(l[0]); err != nil { - return nil, err - } - if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) - } - - // remote_address - r := strings.Split(fields[2], ":") - if len(r) != 2 { - return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) - } - if line.RemAddr, err = parseIP(r[0]); err != nil { - return nil, err - } - if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) - } - - // st - if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) - } - - // tx_queue and rx_queue - q := strings.Split(fields[4], ":") - if len(q) != 2 { - return nil, fmt.Errorf( - "%w: Missing colon for tx/rx queues in socket line %q", - ErrFileParse, - fields[4], - ) - } - if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) - } - if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) - } - - // uid - if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) - } - - // inode - if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) - } - - // drops - if isUDP { - drops, err := strconv.ParseUint(fields[12], 0, 64) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) - } - line.Drops = &drops - } - - return line, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_protocols.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_protocols.go deleted file mode 100644 index 8d4b1ac05b..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_protocols.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// NetProtocolStats stores the contents from /proc/net/protocols. -type NetProtocolStats map[string]NetProtocolStatLine - -// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We -// only care about the first six columns as the rest are not likely to change -// and only serve to provide a set of capabilities for each protocol. -type NetProtocolStatLine struct { - Name string // 0 The name of the protocol - Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) - Sockets int64 // 2 Number of sockets in use by this protocol - Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol - Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. - MaxHeader uint64 // 5 Protocol specific max header size - Slab bool // 6 Indicates whether or not memory is allocated from the SLAB - ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module - Capabilities NetProtocolCapabilities -} - -// NetProtocolCapabilities contains a list of capabilities for each protocol. -type NetProtocolCapabilities struct { - Close bool // 8 - Connect bool // 9 - Disconnect bool // 10 - Accept bool // 11 - IoCtl bool // 12 - Init bool // 13 - Destroy bool // 14 - Shutdown bool // 15 - SetSockOpt bool // 16 - GetSockOpt bool // 17 - SendMsg bool // 18 - RecvMsg bool // 19 - SendPage bool // 20 - Bind bool // 21 - BacklogRcv bool // 22 - Hash bool // 23 - UnHash bool // 24 - GetPort bool // 25 - EnterMemoryPressure bool // 26 -} - -// NetProtocols reads stats from /proc/net/protocols and returns a map of -// PortocolStatLine entries. As of this writing no official Linux Documentation -// exists, however the source is fairly self-explanatory and the format seems -// stable since its introduction in 2.6.12-rc2 -// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 -// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 -func (fs FS) NetProtocols() (NetProtocolStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) - if err != nil { - return NetProtocolStats{}, err - } - return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) -} - -func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { - nps := NetProtocolStats{} - - // Skip the header line - s.Scan() - - for s.Scan() { - line, err := nps.parseLine(s.Text()) - if err != nil { - return NetProtocolStats{}, err - } - - nps[line.Name] = *line - } - return nps, nil -} - -func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { - line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} - var err error - const enabled = "yes" - const disabled = "no" - - fields := strings.Fields(rawLine) - line.Name = fields[0] - line.Size, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.Memory, err = strconv.ParseInt(fields[3], 10, 64) - if err != nil { - return nil, err - } - switch fields[4] { - case enabled: - line.Pressure = 1 - case disabled: - line.Pressure = 0 - default: - line.Pressure = -1 - } - line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - switch fields[6] { - case enabled: - line.Slab = true - case disabled: - line.Slab = false - default: - return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) - } - line.ModuleName = fields[7] - - err = line.Capabilities.parseCapabilities(fields[8:]) - if err != nil { - return nil, err - } - - return line, nil -} - -func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { - // The capabilities are all bools so we can loop over to map them - capabilityFields := [...]*bool{ - &pc.Close, - &pc.Connect, - &pc.Disconnect, - &pc.Accept, - &pc.IoCtl, - &pc.Init, - &pc.Destroy, - &pc.Shutdown, - &pc.SetSockOpt, - &pc.GetSockOpt, - &pc.SendMsg, - &pc.RecvMsg, - &pc.SendPage, - &pc.Bind, - &pc.BacklogRcv, - &pc.Hash, - &pc.UnHash, - &pc.GetPort, - &pc.EnterMemoryPressure, - } - - for i := 0; i < len(capabilities); i++ { - switch capabilities[i] { - case "y": - *capabilityFields[i] = true - case "n": - *capabilityFields[i] = false - default: - return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) - } - } - return nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_route.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_route.go deleted file mode 100644 index deb7029fe1..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_route.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -const ( - blackholeRepresentation string = "*" - blackholeIfaceName string = "blackhole" - routeLineColumns int = 11 -) - -// A NetRouteLine represents one line from net/route. -type NetRouteLine struct { - Iface string - Destination uint32 - Gateway uint32 - Flags uint32 - RefCnt uint32 - Use uint32 - Metric uint32 - Mask uint32 - MTU uint32 - Window uint32 - IRTT uint32 -} - -func (fs FS) NetRoute() ([]NetRouteLine, error) { - return readNetRoute(fs.proc.Path("net", "route")) -} - -func readNetRoute(path string) ([]NetRouteLine, error) { - b, err := util.ReadFileNoStat(path) - if err != nil { - return nil, err - } - - routelines, err := parseNetRoute(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) - } - return routelines, nil -} - -func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { - var routelines []NetRouteLine - - scanner := bufio.NewScanner(r) - scanner.Scan() - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - routeline, err := parseNetRouteLine(fields) - if err != nil { - return nil, err - } - routelines = append(routelines, *routeline) - } - return routelines, nil -} - -func parseNetRouteLine(fields []string) (*NetRouteLine, error) { - if len(fields) != routeLineColumns { - return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) - } - iface := fields[0] - if iface == blackholeRepresentation { - iface = blackholeIfaceName - } - destination, err := strconv.ParseUint(fields[1], 16, 32) - if err != nil { - return nil, err - } - gateway, err := strconv.ParseUint(fields[2], 16, 32) - if err != nil { - return nil, err - } - flags, err := strconv.ParseUint(fields[3], 10, 32) - if err != nil { - return nil, err - } - refcnt, err := strconv.ParseUint(fields[4], 10, 32) - if err != nil { - return nil, err - } - use, err := strconv.ParseUint(fields[5], 10, 32) - if err != nil { - return nil, err - } - metric, err := strconv.ParseUint(fields[6], 10, 32) - if err != nil { - return nil, err - } - mask, err := strconv.ParseUint(fields[7], 16, 32) - if err != nil { - return nil, err - } - mtu, err := strconv.ParseUint(fields[8], 10, 32) - if err != nil { - return nil, err - } - window, err := strconv.ParseUint(fields[9], 10, 32) - if err != nil { - return nil, err - } - irtt, err := strconv.ParseUint(fields[10], 10, 32) - if err != nil { - return nil, err - } - routeline := &NetRouteLine{ - Iface: iface, - Destination: uint32(destination), - Gateway: uint32(gateway), - Flags: uint32(flags), - RefCnt: uint32(refcnt), - Use: uint32(use), - Metric: uint32(metric), - Mask: uint32(mask), - MTU: uint32(mtu), - Window: uint32(window), - IRTT: uint32(irtt), - } - return routeline, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_sockstat.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_sockstat.go deleted file mode 100644 index fae62b13d9..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_sockstat.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, -// respectively. -type NetSockstat struct { - // Used is non-nil for IPv4 sockstat results, but nil for IPv6. - Used *int - Protocols []NetSockstatProtocol -} - -// A NetSockstatProtocol contains statistics about a given socket protocol. -// Pointer fields indicate that the value may or may not be present on any -// given protocol. -type NetSockstatProtocol struct { - Protocol string - InUse int - Orphan *int - TW *int - Alloc *int - Mem *int - Memory *int -} - -// NetSockstat retrieves IPv4 socket statistics. -func (fs FS) NetSockstat() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat")) -} - -// NetSockstat6 retrieves IPv6 socket statistics. -// -// If IPv6 is disabled on this kernel, the returned error can be checked with -// os.IsNotExist. -func (fs FS) NetSockstat6() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat6")) -} - -// readSockstat opens and parses a NetSockstat from the input file. -func readSockstat(name string) (*NetSockstat, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(name) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseSockstat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) - } - - return stat, nil -} - -// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. -func parseSockstat(r io.Reader) (*NetSockstat, error) { - var stat NetSockstat - s := bufio.NewScanner(r) - for s.Scan() { - // Expect a minimum of a protocol and one key/value pair. - fields := strings.Split(s.Text(), " ") - if len(fields) < 3 { - return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) - } - - // The remaining fields are key/value pairs. - kvs, err := parseSockstatKVs(fields[1:]) - if err != nil { - return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) - } - - // The first field is the protocol. We must trim its colon suffix. - proto := strings.TrimSuffix(fields[0], ":") - switch proto { - case "sockets": - // Special case: IPv4 has a sockets "used" key/value pair that we - // embed at the top level of the structure. - used := kvs["used"] - stat.Used = &used - default: - // Parse all other lines as individual protocols. - nsp := parseSockstatProtocol(kvs) - nsp.Protocol = proto - stat.Protocols = append(stat.Protocols, nsp) - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return &stat, nil -} - -// parseSockstatKVs parses a string slice into a map of key/value pairs. -func parseSockstatKVs(kvs []string) (map[string]int, error) { - if len(kvs)%2 != 0 { - return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) - } - - // Iterate two values at a time to gather key/value pairs. - out := make(map[string]int, len(kvs)/2) - for i := 0; i < len(kvs); i += 2 { - vp := util.NewValueParser(kvs[i+1]) - out[kvs[i]] = vp.Int() - - if err := vp.Err(); err != nil { - return nil, err - } - } - - return out, nil -} - -// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. -func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { - var nsp NetSockstatProtocol - for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v - switch k { - case "inuse": - nsp.InUse = v - case "orphan": - nsp.Orphan = &v - case "tw": - nsp.TW = &v - case "alloc": - nsp.Alloc = &v - case "mem": - nsp.Mem = &v - case "memory": - nsp.Memory = &v - } - } - - return nsp -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_softnet.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_softnet.go deleted file mode 100644 index 71c8059f4d..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_softnet.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// For the proc file format details, -// See: -// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 -// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 -// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 - -// SoftnetStat contains a single row of data from /proc/net/softnet_stat. -type SoftnetStat struct { - // Number of processed packets. - Processed uint32 - // Number of dropped packets. - Dropped uint32 - // Number of times processing packets ran out of quota. - TimeSqueezed uint32 - // Number of collision occur while obtaining device lock while transmitting. - CPUCollision uint32 - // Number of times cpu woken up received_rps. - ReceivedRps uint32 - // number of times flow limit has been reached. - FlowLimitCount uint32 - // Softnet backlog status. - SoftnetBacklogLen uint32 - // CPU id owning this softnet_data. - Index uint32 - // softnet_data's Width. - Width int -} - -var softNetProcFile = "net/softnet_stat" - -// NetSoftnetStat reads data from /proc/net/softnet_stat. -func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { - b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) - if err != nil { - return nil, err - } - - entries, err := parseSoftnet(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) - } - - return entries, nil -} - -func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { - const minColumns = 9 - - s := bufio.NewScanner(r) - - var stats []SoftnetStat - cpuIndex := 0 - for s.Scan() { - columns := strings.Fields(s.Text()) - width := len(columns) - softnetStat := SoftnetStat{} - - if width < minColumns { - return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) - } - - // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 - if width >= minColumns { - us, err := parseHexUint32s(columns[0:9]) - if err != nil { - return nil, err - } - - softnetStat.Processed = us[0] - softnetStat.Dropped = us[1] - softnetStat.TimeSqueezed = us[2] - softnetStat.CPUCollision = us[8] - } - - // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 - if width >= 10 { - us, err := parseHexUint32s(columns[9:10]) - if err != nil { - return nil, err - } - - softnetStat.ReceivedRps = us[0] - } - - // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 - if width >= 11 { - us, err := parseHexUint32s(columns[10:11]) - if err != nil { - return nil, err - } - - softnetStat.FlowLimitCount = us[0] - } - - // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 - if width >= 13 { - us, err := parseHexUint32s(columns[11:13]) - if err != nil { - return nil, err - } - - softnetStat.SoftnetBacklogLen = us[0] - softnetStat.Index = us[1] - } else { - // For older kernels, create the Index based on the scan line number. - softnetStat.Index = uint32(cpuIndex) - } - softnetStat.Width = width - stats = append(stats, softnetStat) - cpuIndex++ - } - - return stats, nil -} - -func parseHexUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_tcp.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_tcp.go deleted file mode 100644 index 0396d72015..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_tcp.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. - NetTCP []*netIPSocketLine - - // NetTCPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetTCP it does not collect - // the parsed lines into a slice. - NetTCPSummary NetIPSocketSummary -) - -// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. -func (fs FS) NetTCP() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp")) -} - -// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp6. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. -func (fs FS) NetTCP6() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp6")) -} - -// NetTCPSummary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. -func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp")) -} - -// NetTCP6Summary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp6. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. -func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp6")) -} - -// newNetTCP creates a new NetTCP{,6} from the contents of the given file. -func newNetTCP(file string) (NetTCP, error) { - n, err := newNetIPSocket(file) - n1 := NetTCP(n) - return n1, err -} - -func newNetTCPSummary(file string) (*NetTCPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetTCPSummary(*n) - return &n1, err -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_tls_stat.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_tls_stat.go deleted file mode 100644 index 13994c1782..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_tls_stat.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2023 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// TLSStat struct represents data in /proc/net/tls_stat. -// See https://docs.kernel.org/networking/tls.html#statistics -type TLSStat struct { - // number of TX sessions currently installed where host handles cryptography - TLSCurrTxSw int - // number of RX sessions currently installed where host handles cryptography - TLSCurrRxSw int - // number of TX sessions currently installed where NIC handles cryptography - TLSCurrTxDevice int - // number of RX sessions currently installed where NIC handles cryptography - TLSCurrRxDevice int - //number of TX sessions opened with host cryptography - TLSTxSw int - //number of RX sessions opened with host cryptography - TLSRxSw int - // number of TX sessions opened with NIC cryptography - TLSTxDevice int - // number of RX sessions opened with NIC cryptography - TLSRxDevice int - // record decryption failed (e.g. due to incorrect authentication tag) - TLSDecryptError int - // number of RX resyncs sent to NICs handling cryptography - TLSRxDeviceResync int - // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. - TLSDecryptRetry int - // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. - TLSRxNoPadViolation int -} - -// NewTLSStat reads the tls_stat statistics. -func NewTLSStat() (TLSStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return TLSStat{}, err - } - - return fs.NewTLSStat() -} - -// NewTLSStat reads the tls_stat statistics. -func (fs FS) NewTLSStat() (TLSStat, error) { - file, err := os.Open(fs.proc.Path("net/tls_stat")) - if err != nil { - return TLSStat{}, err - } - defer file.Close() - - var ( - tlsstat = TLSStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return TLSStat{}, err - } - - switch name { - case "TlsCurrTxSw": - tlsstat.TLSCurrTxSw = value - case "TlsCurrRxSw": - tlsstat.TLSCurrRxSw = value - case "TlsCurrTxDevice": - tlsstat.TLSCurrTxDevice = value - case "TlsCurrRxDevice": - tlsstat.TLSCurrRxDevice = value - case "TlsTxSw": - tlsstat.TLSTxSw = value - case "TlsRxSw": - tlsstat.TLSRxSw = value - case "TlsTxDevice": - tlsstat.TLSTxDevice = value - case "TlsRxDevice": - tlsstat.TLSRxDevice = value - case "TlsDecryptError": - tlsstat.TLSDecryptError = value - case "TlsRxDeviceResync": - tlsstat.TLSRxDeviceResync = value - case "TlsDecryptRetry": - tlsstat.TLSDecryptRetry = value - case "TlsRxNoPadViolation": - tlsstat.TLSRxNoPadViolation = value - } - - } - - return tlsstat, s.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_udp.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_udp.go deleted file mode 100644 index 9ac3daf2d4..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_udp.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetUDP represents the contents of /proc/net/udp{,6} file without the header. - NetUDP []*netIPSocketLine - - // NetUDPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetUDP it does not collect - // the parsed lines into a slice. - NetUDPSummary NetIPSocketSummary -) - -// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp. -func (fs FS) NetUDP() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp")) -} - -// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp6. -func (fs FS) NetUDP6() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp6")) -} - -// NetUDPSummary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp. -func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp")) -} - -// NetUDP6Summary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp6. -func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp6")) -} - -// newNetUDP creates a new NetUDP{,6} from the contents of the given file. -func newNetUDP(file string) (NetUDP, error) { - n, err := newNetIPSocket(file) - n1 := NetUDP(n) - return n1, err -} - -func newNetUDPSummary(file string) (*NetUDPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetUDPSummary(*n) - return &n1, err -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_unix.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_unix.go deleted file mode 100644 index d7e0cacb4c..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_unix.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 -// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. - -// Constants for the various /proc/net/unix enumerations. -// TODO: match against x/sys/unix or similar? -const ( - netUnixTypeStream = 1 - netUnixTypeDgram = 2 - netUnixTypeSeqpacket = 5 - - netUnixFlagDefault = 0 - netUnixFlagListen = 1 << 16 - - netUnixStateUnconnected = 1 - netUnixStateConnecting = 2 - netUnixStateConnected = 3 - netUnixStateDisconnected = 4 -) - -// NetUNIXType is the type of the type field. -type NetUNIXType uint64 - -// NetUNIXFlags is the type of the flags field. -type NetUNIXFlags uint64 - -// NetUNIXState is the type of the state field. -type NetUNIXState uint64 - -// NetUNIXLine represents a line of /proc/net/unix. -type NetUNIXLine struct { - KernelPtr string - RefCount uint64 - Protocol uint64 - Flags NetUNIXFlags - Type NetUNIXType - State NetUNIXState - Inode uint64 - Path string -} - -// NetUNIX holds the data read from /proc/net/unix. -type NetUNIX struct { - Rows []*NetUNIXLine -} - -// NetUNIX returns data read from /proc/net/unix. -func (fs FS) NetUNIX() (*NetUNIX, error) { - return readNetUNIX(fs.proc.Path("net/unix")) -} - -// readNetUNIX reads data in /proc/net/unix format from the specified file. -func readNetUNIX(file string) (*NetUNIX, error) { - // This file could be quite large and a streaming read is desirable versus - // reading the entire contents at once. - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - return parseNetUNIX(f) -} - -// parseNetUNIX creates a NetUnix structure from the incoming stream. -func parseNetUNIX(r io.Reader) (*NetUNIX, error) { - // Begin scanning by checking for the existence of Inode. - s := bufio.NewScanner(r) - s.Scan() - - // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. This code works for both cases. - hasInode := strings.Contains(s.Text(), "Inode") - - // Expect a minimum number of fields, but Inode and Path are optional: - // Num RefCount Protocol Flags Type St Inode Path - minFields := 6 - if hasInode { - minFields++ - } - - var nu NetUNIX - for s.Scan() { - line := s.Text() - item, err := nu.parseLine(line, hasInode, minFields) - if err != nil { - return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) - } - - nu.Rows = append(nu.Rows, item) - } - - if err := s.Err(); err != nil { - return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) - } - - return &nu, nil -} - -func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { - fields := strings.Fields(line) - - l := len(fields) - if l < minFields { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) - } - - // Field offsets are as follows: - // Num RefCount Protocol Flags Type St Inode Path - - kernelPtr := strings.TrimSuffix(fields[0], ":") - - users, err := u.parseUsers(fields[1]) - if err != nil { - return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) - } - - flags, err := u.parseFlags(fields[3]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) - } - - typ, err := u.parseType(fields[4]) - if err != nil { - return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) - } - - state, err := u.parseState(fields[5]) - if err != nil { - return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) - } - - var inode uint64 - if hasInode { - inode, err = u.parseInode(fields[6]) - if err != nil { - return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) - } - } - - n := &NetUNIXLine{ - KernelPtr: kernelPtr, - RefCount: users, - Type: typ, - Flags: flags, - State: state, - Inode: inode, - } - - // Path field is optional. - if l > minFields { - // Path occurs at either index 6 or 7 depending on whether inode is - // already present. - pathIdx := 7 - if !hasInode { - pathIdx-- - } - - n.Path = fields[pathIdx] - } - - return n, nil -} - -func (u NetUNIX) parseUsers(s string) (uint64, error) { - return strconv.ParseUint(s, 16, 32) -} - -func (u NetUNIX) parseType(s string) (NetUNIXType, error) { - typ, err := strconv.ParseUint(s, 16, 16) - if err != nil { - return 0, err - } - - return NetUNIXType(typ), nil -} - -func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { - flags, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return 0, err - } - - return NetUNIXFlags(flags), nil -} - -func (u NetUNIX) parseState(s string) (NetUNIXState, error) { - st, err := strconv.ParseInt(s, 16, 8) - if err != nil { - return 0, err - } - - return NetUNIXState(st), nil -} - -func (u NetUNIX) parseInode(s string) (uint64, error) { - return strconv.ParseUint(s, 10, 64) -} - -func (t NetUNIXType) String() string { - switch t { - case netUnixTypeStream: - return "stream" - case netUnixTypeDgram: - return "dgram" - case netUnixTypeSeqpacket: - return "seqpacket" - } - return "unknown" -} - -func (f NetUNIXFlags) String() string { - switch f { - case netUnixFlagListen: - return "listen" - default: - return "default" - } -} - -func (s NetUNIXState) String() string { - switch s { - case netUnixStateUnconnected: - return "unconnected" - case netUnixStateConnecting: - return "connecting" - case netUnixStateConnected: - return "connected" - case netUnixStateDisconnected: - return "disconnected" - } - return "unknown" -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_wireless.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_wireless.go deleted file mode 100644 index 7c597bc870..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_wireless.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Wireless models the content of /proc/net/wireless. -type Wireless struct { - Name string - - // Status is the current 4-digit hex value status of the interface. - Status uint64 - - // QualityLink is the link quality. - QualityLink int - - // QualityLevel is the signal gain (dBm). - QualityLevel int - - // QualityNoise is the signal noise baseline (dBm). - QualityNoise int - - // DiscardedNwid is the number of discarded packets with wrong nwid/essid. - DiscardedNwid int - - // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). - DiscardedCrypt int - - // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. - DiscardedFrag int - - // DiscardedRetry is the number of discarded packets that reached max MAC retries. - DiscardedRetry int - - // DiscardedMisc is the number of discarded packets for other reasons. - DiscardedMisc int - - // MissedBeacon is the number of missed beacons/superframe. - MissedBeacon int -} - -// Wireless returns kernel wireless statistics. -func (fs FS) Wireless() ([]*Wireless, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) - if err != nil { - return nil, err - } - - m, err := parseWireless(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) - } - - return m, nil -} - -// parseWireless parses the contents of /proc/net/wireless. -/* -Inter-| sta-| Quality | Discarded packets | Missed | WE -face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 - eth1: 0000 5. -256. -10. 0 1 0 3 0 0 - eth2: 0000 5. -256. -20. 0 2 0 4 0 0 -*/ -func parseWireless(r io.Reader) ([]*Wireless, error) { - var ( - interfaces []*Wireless - scanner = bufio.NewScanner(r) - ) - - for n := 0; scanner.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line := scanner.Text() - - parts := strings.Split(line, ":") - if len(parts) != 2 { - return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) - } - - name := strings.TrimSpace(parts[0]) - stats := strings.Fields(parts[1]) - - if len(stats) < 10 { - return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) - } - - status, err := strconv.ParseUint(stats[0], 16, 16) - if err != nil { - return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) - } - - qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) - if err != nil { - return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) - } - - qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) - if err != nil { - return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) - } - - qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) - if err != nil { - return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) - } - - dnwid, err := strconv.Atoi(stats[4]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) - } - - dcrypt, err := strconv.Atoi(stats[5]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) - } - - dfrag, err := strconv.Atoi(stats[6]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) - } - - dretry, err := strconv.Atoi(stats[7]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) - } - - dmisc, err := strconv.Atoi(stats[8]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) - } - - mbeacon, err := strconv.Atoi(stats[9]) - if err != nil { - return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) - } - - w := &Wireless{ - Name: name, - Status: status, - QualityLink: qlink, - QualityLevel: qlevel, - QualityNoise: qnoise, - DiscardedNwid: dnwid, - DiscardedCrypt: dcrypt, - DiscardedFrag: dfrag, - DiscardedRetry: dretry, - DiscardedMisc: dmisc, - MissedBeacon: mbeacon, - } - - interfaces = append(interfaces, w) - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) - } - - return interfaces, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/net_xfrm.go b/openshift/tools/vendor/github.com/prometheus/procfs/net_xfrm.go deleted file mode 100644 index 932ef20468..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/net_xfrm.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - // Forward routing of a packet is not allowed - XfrmFwdHdrError int - // State is invalid, perhaps expired - XfrmOutStateInvalid int - // State hasn’t been fully acquired before use - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.proc.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/netstat.go b/openshift/tools/vendor/github.com/prometheus/procfs/netstat.go deleted file mode 100644 index 742dff453b..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/netstat.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "os" - "path/filepath" - "strconv" - "strings" -) - -// NetStat contains statistics for all the counters from one file. -type NetStat struct { - Stats map[string][]uint64 - Filename string -} - -// NetStat retrieves stats from `/proc/net/stat/`. -func (fs FS) NetStat() ([]NetStat, error) { - statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) - if err != nil { - return nil, err - } - - var netStatsTotal []NetStat - - for _, filePath := range statFiles { - procNetstat, err := parseNetstat(filePath) - if err != nil { - return nil, err - } - procNetstat.Filename = filepath.Base(filePath) - - netStatsTotal = append(netStatsTotal, procNetstat) - } - return netStatsTotal, nil -} - -// parseNetstat parses the metrics from `/proc/net/stat/` file -// and returns a NetStat structure. -func parseNetstat(filePath string) (NetStat, error) { - netStat := NetStat{ - Stats: make(map[string][]uint64), - } - file, err := os.Open(filePath) - if err != nil { - return netStat, err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - scanner.Scan() - - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) - - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return NetStat{}, err - } - netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) - } - } - - return netStat, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index 368187fa88..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -var ( - ErrFileParse = errors.New("error parsing file") - ErrFileRead = errors.New("error reading file") - ErrMountPoint = errors.New("error accessing mount point") -) - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil || errors.Unwrap(err) == ErrMountPoint { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.proc.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// NewProc returns a process for the given pid. -// -// Deprecated: Use fs.Proc() instead. -func (fs FS) NewProc(pid int) (Proc, error) { - return fs.Proc(pid) -} - -// Proc returns a process for the given pid. -func (fs FS) Proc(pid int) (Proc, error) { - if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.proc.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(p.path("cmdline")) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil -} - -// Wchan returns the wchan (wait channel) of a process. -func (p Proc) Wchan() (string, error) { - f, err := os.Open(p.path("wchan")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := io.ReadAll(f) - if err != nil { - return "", err - } - - wchan := string(data) - if wchan == "" || wchan == "0" { - return "", nil - } - - return wchan, nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - data, err := util.ReadFileNoStat(p.path("comm")) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot). -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 - if p.fs.isReal { - stat, err := os.Stat(p.path("fd")) - if err != nil { - return 0, err - } - - size := stat.Size() - if size > 0 { - return int(size), nil - } - } - - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - -// MountInfo retrieves mount information for mount points in a -// process's namespace. -// It supplies information missing in `/proc/self/mounts` and -// fixes various other problems with that file too. -func (p Proc) MountInfo() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(p.path("mountinfo")) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} - -// FileDescriptorsInfo retrieves information about all file descriptors of -// the process. -func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - var fdinfos ProcFDInfos - - for _, n := range names { - fdinfo, err := p.FDInfo(n) - if err != nil { - continue - } - fdinfos = append(fdinfos, *fdinfo) - } - - return fdinfos, nil -} - -// Schedstat returns task scheduling information for the process. -func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := os.ReadFile(p.path("schedstat")) - if err != nil { - return ProcSchedstat{}, err - } - return parseProcSchedstat(string(contents)) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go deleted file mode 100644 index 4a64347c03..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource -// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies -// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in -// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of -// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID -// in this hierarchy -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type Cgroup struct { - // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one - // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number - HierarchyID int - // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For - // Cgroups V2 this may be empty, as all active controllers use the same hierarchy - Controllers []string - // Path of this control group, relative to the mount point of the cgroupfs representing this specific - // hierarchy - Path string -} - -// parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path. -func parseCgroupString(cgroupStr string) (*Cgroup, error) { - var err error - - fields := strings.SplitN(cgroupStr, ":", 3) - if len(fields) < 3 { - return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) - } - - cgroup := &Cgroup{ - Path: fields[2], - Controllers: nil, - } - cgroup.HierarchyID, err = strconv.Atoi(fields[0]) - if err != nil { - return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) - } - if fields[1] != "" { - ssNames := strings.Split(fields[1], ",") - cgroup.Controllers = append(cgroup.Controllers, ssNames...) - } - return cgroup, nil -} - -// parseCgroups reads each line of the /proc/[pid]/cgroup file. -func parseCgroups(data []byte) ([]Cgroup, error) { - var cgroups []Cgroup - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseCgroupString(mountString) - if err != nil { - return nil, err - } - cgroups = append(cgroups, *parsedMounts) - } - - err := scanner.Err() - return cgroups, err -} - -// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process -// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system. -func (p Proc) Cgroups() ([]Cgroup, error) { - data, err := util.ReadFileNoStat(p.path("cgroup")) - if err != nil { - return nil, err - } - return parseCgroups(data) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_cgroups.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_cgroups.go deleted file mode 100644 index 5dd4938999..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CgroupSummary models one line from /proc/cgroups. -// This file contains information about the controllers that are compiled into the kernel. -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type CgroupSummary struct { - // The name of the controller. controller is also known as subsystem. - SubsysName string - // The unique ID of the cgroup hierarchy on which this controller is mounted. - Hierarchy int - // The number of control groups in this hierarchy using this controller. - Cgroups int - // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled - Enabled int -} - -// parseCgroupSummary parses each line of the /proc/cgroup file -// Line format is `subsys_name hierarchy num_cgroups enabled`. -func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { - var err error - - fields := strings.Fields(CgroupSummaryStr) - // require at least 4 fields - if len(fields) < 4 { - return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) - } - - CgroupSummary := &CgroupSummary{ - SubsysName: fields[0], - } - CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) - } - CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) - } - CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) - } - return CgroupSummary, nil -} - -// parseCgroupSummary reads each line of the /proc/cgroup file. -func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { - var CgroupSummarys []CgroupSummary - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - CgroupSummaryString := scanner.Text() - // ignore comment lines - if strings.HasPrefix(CgroupSummaryString, "#") { - continue - } - CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) - if err != nil { - return nil, err - } - CgroupSummarys = append(CgroupSummarys, *CgroupSummary) - } - - err := scanner.Err() - return CgroupSummarys, err -} - -// CgroupSummarys returns information about current /proc/cgroups. -func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) - if err != nil { - return nil, err - } - return parseCgroupSummary(data) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_environ.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_environ.go deleted file mode 100644 index 57a89895d6..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_environ.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Environ reads process environments from `/proc//environ`. -func (p Proc) Environ() ([]string, error) { - environments := make([]string, 0) - - data, err := util.ReadFileNoStat(p.path("environ")) - if err != nil { - return environments, err - } - - environments = strings.Split(string(data), "\000") - if len(environments) > 0 { - environments = environments[:len(environments)-1] - } - - return environments, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_fdinfo.go deleted file mode 100644 index fa761b3529..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) - rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) - rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) - rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) - rInotify = regexp.MustCompile(`^inotify`) - rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) -) - -// ProcFDInfo contains represents file descriptor information. -type ProcFDInfo struct { - // File descriptor - FD string - // File offset - Pos string - // File access mode and status flags - Flags string - // Mount point ID - MntID string - // Inode number - Ino string - // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) - InotifyInfos []InotifyInfo -} - -// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. -func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { - data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) - if err != nil { - return nil, err - } - - var text, pos, flags, mntid, ino string - var inotify []InotifyInfo - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - text = scanner.Text() - if rPos.MatchString(text) { - pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { - flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { - mntid = rMntID.FindStringSubmatch(text)[1] - } else if rIno.MatchString(text) { - ino = rIno.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { - newInotify, err := parseInotifyInfo(text) - if err != nil { - return nil, err - } - inotify = append(inotify, *newInotify) - } - } - - i := &ProcFDInfo{ - FD: fd, - Pos: pos, - Flags: flags, - MntID: mntid, - Ino: ino, - InotifyInfos: inotify, - } - - return i, nil -} - -// InotifyInfo represents a single inotify line in the fdinfo file. -type InotifyInfo struct { - // Watch descriptor number - WD string - // Inode number - Ino string - // Device ID - Sdev string - // Mask of events being monitored - Mask string -} - -// InotifyInfo constructor. Only available on kernel 3.8+. -func parseInotifyInfo(line string) (*InotifyInfo, error) { - m := rInotifyParts.FindStringSubmatch(line) - if len(m) >= 4 { - var mask string - if len(m) == 5 { - mask = m[4] - } - i := &InotifyInfo{ - WD: m[1], - Ino: m[2], - Sdev: m[3], - Mask: mask, - } - return i, nil - } - return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) -} - -// ProcFDInfos represents a list of ProcFDInfo structs. -type ProcFDInfos []ProcFDInfo - -func (p ProcFDInfos) Len() int { return len(p) } -func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } - -// InotifyWatchLen returns the total number of inotify watches. -func (p ProcFDInfos) InotifyWatchLen() (int, error) { - length := 0 - for _, f := range p { - length += len(f.InotifyInfos) - } - - return length, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_interrupts.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_interrupts.go deleted file mode 100644 index 86b4b45246..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Interrupt represents a single interrupt line. -type Interrupt struct { - // Info is the type of interrupt. - Info string - // Devices is the name of the device that is located at that IRQ - Devices string - // Values is the number of interrupts per CPU. - Values []string -} - -// Interrupts models the content of /proc/interrupts. Key is the IRQ number. -// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts -// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output -type Interrupts map[string]Interrupt - -// Interrupts creates a new instance from a given Proc instance. -func (p Proc) Interrupts() (Interrupts, error) { - data, err := util.ReadFileNoStat(p.path("interrupts")) - if err != nil { - return nil, err - } - return parseInterrupts(bytes.NewReader(data)) -} - -func parseInterrupts(r io.Reader) (Interrupts, error) { - var ( - interrupts = Interrupts{} - scanner = bufio.NewScanner(r) - ) - - if !scanner.Scan() { - return nil, errors.New("interrupts empty") - } - cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu - - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - if len(parts) == 0 { // skip empty lines - continue - } - if len(parts) < 2 { - return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) - } - intName := parts[0][:len(parts[0])-1] // remove trailing : - - if len(parts) == 2 { - interrupts[intName] = Interrupt{ - Info: "", - Devices: "", - Values: []string{ - parts[1], - }, - } - continue - } - - intr := Interrupt{ - Values: parts[1 : cpuNum+1], - } - - if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt - intr.Info = parts[cpuNum+1] - intr.Devices = strings.Join(parts[cpuNum+2:], " ") - } else { - intr.Info = strings.Join(parts[cpuNum+1:], " ") - } - interrupts[intName] = intr - } - - return interrupts, scanner.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_io.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index d15b66ddb6..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// IO creates a new ProcIO instance from a given Proc instance. -func (p Proc) IO() (ProcIO, error) { - pio := ProcIO{} - - data, err := util.ReadFileNoStat(p.path("io")) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" //nolint:misspell - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - - return pio, err -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_limits.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index 9530b14bc6..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime uint64 - // Maximum size of files that the process may create. - FileSize uint64 - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize uint64 - // Maximum size of the process stack in bytes. - StackSize uint64 - // Maximum size of a core file. - CoreFileSize uint64 - // Limit of the process's resident set in pages. - ResidentSet uint64 - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes uint64 - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles uint64 - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory uint64 - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace uint64 - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks uint64 - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals uint64 - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize uint64 - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority uint64 - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority uint64 - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout uint64 -} - -const ( - limitsFields = 4 - limitsUnlimited = "unlimited" -) - -var ( - limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) -) - -// NewLimits returns the current soft limits of the process. -// -// Deprecated: Use p.Limits() instead. -func (p Proc) NewLimits() (ProcLimits, error) { - return p.Limits() -} - -// Limits returns the current soft limits of the process. -func (p Proc) Limits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - - s.Scan() // Skip limits header - - for s.Scan() { - //fields := limitsMatch.Split(s.Text(), limitsFields) - fields := limitsMatch.FindStringSubmatch(s.Text()) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) - } - - switch fields[1] { - case "Max cpu time": - l.CPUTime, err = parseUint(fields[2]) - case "Max file size": - l.FileSize, err = parseUint(fields[2]) - case "Max data size": - l.DataSize, err = parseUint(fields[2]) - case "Max stack size": - l.StackSize, err = parseUint(fields[2]) - case "Max core file size": - l.CoreFileSize, err = parseUint(fields[2]) - case "Max resident set": - l.ResidentSet, err = parseUint(fields[2]) - case "Max processes": - l.Processes, err = parseUint(fields[2]) - case "Max open files": - l.OpenFiles, err = parseUint(fields[2]) - case "Max locked memory": - l.LockedMemory, err = parseUint(fields[2]) - case "Max address space": - l.AddressSpace, err = parseUint(fields[2]) - case "Max file locks": - l.FileLocks, err = parseUint(fields[2]) - case "Max pending signals": - l.PendingSignals, err = parseUint(fields[2]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseUint(fields[2]) - case "Max nice priority": - l.NicePriority, err = parseUint(fields[2]) - case "Max realtime priority": - l.RealtimePriority, err = parseUint(fields[2]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseUint(fields[2]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseUint(s string) (uint64, error) { - if s == limitsUnlimited { - return 18446744073709551615, nil - } - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) - } - return i, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_maps.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_maps.go deleted file mode 100644 index 7e75c286b5..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_maps.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !js - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. -type ProcMapPermissions struct { - // mapping has the [R]ead flag set - Read bool - // mapping has the [W]rite flag set - Write bool - // mapping has the [X]ecutable flag set - Execute bool - // mapping has the [S]hared flag set - Shared bool - // mapping is marked as [P]rivate (copy on write) - Private bool -} - -// ProcMap contains the process memory-mappings of the process -// read from `/proc/[pid]/maps`. -type ProcMap struct { - // The start address of current mapping. - StartAddr uintptr - // The end address of the current mapping - EndAddr uintptr - // The permissions for this mapping - Perms *ProcMapPermissions - // The current offset into the file/fd (e.g., shared libs) - Offset int64 - // Device owner of this mapping (major:minor) in Mkdev format. - Dev uint64 - // The inode of the device above - Inode uint64 - // The file or psuedofile (or empty==anonymous) - Pathname string -} - -// parseDevice parses the device token of a line and converts it to a dev_t -// (mkdev) like structure. -func parseDevice(s string) (uint64, error) { - i := strings.Index(s, ":") - if i == -1 { - return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) - } - - major, err := strconv.ParseUint(s[0:i], 16, 0) - if err != nil { - return 0, err - } - - minor, err := strconv.ParseUint(s[i+1:], 16, 0) - if err != nil { - return 0, err - } - - return unix.Mkdev(uint32(major), uint32(minor)), nil -} - -// parseAddress converts a hex-string to a uintptr. -func parseAddress(s string) (uintptr, error) { - a, err := strconv.ParseUint(s, 16, 0) - if err != nil { - return 0, err - } - - return uintptr(a), nil -} - -// parseAddresses parses the start-end address. -func parseAddresses(s string) (uintptr, uintptr, error) { - idx := strings.Index(s, "-") - if idx == -1 { - return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) - } - - saddr, err := parseAddress(s[0:idx]) - if err != nil { - return 0, 0, err - } - - eaddr, err := parseAddress(s[idx+1:]) - if err != nil { - return 0, 0, err - } - - return saddr, eaddr, nil -} - -// parsePermissions parses a token and returns any that are set. -func parsePermissions(s string) (*ProcMapPermissions, error) { - if len(s) < 4 { - return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) - } - - perms := ProcMapPermissions{} - for _, ch := range s { - switch ch { - case 'r': - perms.Read = true - case 'w': - perms.Write = true - case 'x': - perms.Execute = true - case 'p': - perms.Private = true - case 's': - perms.Shared = true - } - } - - return &perms, nil -} - -// parseProcMap will attempt to parse a single line within a proc/[pid]/maps -// buffer. -func parseProcMap(text string) (*ProcMap, error) { - fields := strings.Fields(text) - if len(fields) < 5 { - return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) - } - - saddr, eaddr, err := parseAddresses(fields[0]) - if err != nil { - return nil, err - } - - perms, err := parsePermissions(fields[1]) - if err != nil { - return nil, err - } - - offset, err := strconv.ParseInt(fields[2], 16, 0) - if err != nil { - return nil, err - } - - device, err := parseDevice(fields[3]) - if err != nil { - return nil, err - } - - inode, err := strconv.ParseUint(fields[4], 10, 0) - if err != nil { - return nil, err - } - - pathname := "" - - if len(fields) >= 5 { - pathname = strings.Join(fields[5:], " ") - } - - return &ProcMap{ - StartAddr: saddr, - EndAddr: eaddr, - Perms: perms, - Offset: offset, - Dev: device, - Inode: inode, - Pathname: pathname, - }, nil -} - -// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the -// process. -func (p Proc) ProcMaps() ([]*ProcMap, error) { - file, err := os.Open(p.path("maps")) - if err != nil { - return nil, err - } - defer file.Close() - - maps := []*ProcMap{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - m, err := parseProcMap(scan.Text()) - if err != nil { - return nil, err - } - - maps = append(maps, m) - } - - return maps, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_netstat.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_netstat.go deleted file mode 100644 index 4248c1716e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_netstat.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcNetstat models the content of /proc//net/netstat. -type ProcNetstat struct { - // The process ID. - PID int - TcpExt - IpExt -} - -type TcpExt struct { // nolint:revive - SyncookiesSent *float64 - SyncookiesRecv *float64 - SyncookiesFailed *float64 - EmbryonicRsts *float64 - PruneCalled *float64 - RcvPruned *float64 - OfoPruned *float64 - OutOfWindowIcmps *float64 - LockDroppedIcmps *float64 - ArpFilter *float64 - TW *float64 - TWRecycled *float64 - TWKilled *float64 - PAWSActive *float64 - PAWSEstab *float64 - DelayedACKs *float64 - DelayedACKLocked *float64 - DelayedACKLost *float64 - ListenOverflows *float64 - ListenDrops *float64 - TCPHPHits *float64 - TCPPureAcks *float64 - TCPHPAcks *float64 - TCPRenoRecovery *float64 - TCPSackRecovery *float64 - TCPSACKReneging *float64 - TCPSACKReorder *float64 - TCPRenoReorder *float64 - TCPTSReorder *float64 - TCPFullUndo *float64 - TCPPartialUndo *float64 - TCPDSACKUndo *float64 - TCPLossUndo *float64 - TCPLostRetransmit *float64 - TCPRenoFailures *float64 - TCPSackFailures *float64 - TCPLossFailures *float64 - TCPFastRetrans *float64 - TCPSlowStartRetrans *float64 - TCPTimeouts *float64 - TCPLossProbes *float64 - TCPLossProbeRecovery *float64 - TCPRenoRecoveryFail *float64 - TCPSackRecoveryFail *float64 - TCPRcvCollapsed *float64 - TCPDSACKOldSent *float64 - TCPDSACKOfoSent *float64 - TCPDSACKRecv *float64 - TCPDSACKOfoRecv *float64 - TCPAbortOnData *float64 - TCPAbortOnClose *float64 - TCPAbortOnMemory *float64 - TCPAbortOnTimeout *float64 - TCPAbortOnLinger *float64 - TCPAbortFailed *float64 - TCPMemoryPressures *float64 - TCPMemoryPressuresChrono *float64 - TCPSACKDiscard *float64 - TCPDSACKIgnoredOld *float64 - TCPDSACKIgnoredNoUndo *float64 - TCPSpuriousRTOs *float64 - TCPMD5NotFound *float64 - TCPMD5Unexpected *float64 - TCPMD5Failure *float64 - TCPSackShifted *float64 - TCPSackMerged *float64 - TCPSackShiftFallback *float64 - TCPBacklogDrop *float64 - PFMemallocDrop *float64 - TCPMinTTLDrop *float64 - TCPDeferAcceptDrop *float64 - IPReversePathFilter *float64 - TCPTimeWaitOverflow *float64 - TCPReqQFullDoCookies *float64 - TCPReqQFullDrop *float64 - TCPRetransFail *float64 - TCPRcvCoalesce *float64 - TCPRcvQDrop *float64 - TCPOFOQueue *float64 - TCPOFODrop *float64 - TCPOFOMerge *float64 - TCPChallengeACK *float64 - TCPSYNChallenge *float64 - TCPFastOpenActive *float64 - TCPFastOpenActiveFail *float64 - TCPFastOpenPassive *float64 - TCPFastOpenPassiveFail *float64 - TCPFastOpenListenOverflow *float64 - TCPFastOpenCookieReqd *float64 - TCPFastOpenBlackhole *float64 - TCPSpuriousRtxHostQueues *float64 - BusyPollRxPackets *float64 - TCPAutoCorking *float64 - TCPFromZeroWindowAdv *float64 - TCPToZeroWindowAdv *float64 - TCPWantZeroWindowAdv *float64 - TCPSynRetrans *float64 - TCPOrigDataSent *float64 - TCPHystartTrainDetect *float64 - TCPHystartTrainCwnd *float64 - TCPHystartDelayDetect *float64 - TCPHystartDelayCwnd *float64 - TCPACKSkippedSynRecv *float64 - TCPACKSkippedPAWS *float64 - TCPACKSkippedSeq *float64 - TCPACKSkippedFinWait2 *float64 - TCPACKSkippedTimeWait *float64 - TCPACKSkippedChallenge *float64 - TCPWinProbe *float64 - TCPKeepAlive *float64 - TCPMTUPFail *float64 - TCPMTUPSuccess *float64 - TCPWqueueTooBig *float64 -} - -type IpExt struct { // nolint:revive - InNoRoutes *float64 - InTruncatedPkts *float64 - InMcastPkts *float64 - OutMcastPkts *float64 - InBcastPkts *float64 - OutBcastPkts *float64 - InOctets *float64 - OutOctets *float64 - InMcastOctets *float64 - OutMcastOctets *float64 - InBcastOctets *float64 - OutBcastOctets *float64 - InCsumErrors *float64 - InNoECTPkts *float64 - InECT1Pkts *float64 - InECT0Pkts *float64 - InCEPkts *float64 - ReasmOverlaps *float64 -} - -func (p Proc) Netstat() (ProcNetstat, error) { - filename := p.path("net/netstat") - data, err := util.ReadFileNoStat(filename) - if err != nil { - return ProcNetstat{PID: p.PID}, err - } - procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) - procNetstat.PID = p.PID - return procNetstat, err -} - -// parseProcNetstat parses the metrics from proc//net/netstat file -// and returns a ProcNetstat structure. -func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { - var ( - scanner = bufio.NewScanner(r) - procNetstat = ProcNetstat{} - ) - - for scanner.Scan() { - nameParts := strings.Split(scanner.Text(), " ") - scanner.Scan() - valueParts := strings.Split(scanner.Text(), " ") - // Remove trailing :. - protocol := strings.TrimSuffix(nameParts[0], ":") - if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", - ErrFileParse, fileName, protocol) - } - for i := 1; i < len(nameParts); i++ { - value, err := strconv.ParseFloat(valueParts[i], 64) - if err != nil { - return procNetstat, err - } - key := nameParts[i] - - switch protocol { - case "TcpExt": - switch key { - case "SyncookiesSent": - procNetstat.SyncookiesSent = &value - case "SyncookiesRecv": - procNetstat.SyncookiesRecv = &value - case "SyncookiesFailed": - procNetstat.SyncookiesFailed = &value - case "EmbryonicRsts": - procNetstat.EmbryonicRsts = &value - case "PruneCalled": - procNetstat.PruneCalled = &value - case "RcvPruned": - procNetstat.RcvPruned = &value - case "OfoPruned": - procNetstat.OfoPruned = &value - case "OutOfWindowIcmps": - procNetstat.OutOfWindowIcmps = &value - case "LockDroppedIcmps": - procNetstat.LockDroppedIcmps = &value - case "ArpFilter": - procNetstat.ArpFilter = &value - case "TW": - procNetstat.TW = &value - case "TWRecycled": - procNetstat.TWRecycled = &value - case "TWKilled": - procNetstat.TWKilled = &value - case "PAWSActive": - procNetstat.PAWSActive = &value - case "PAWSEstab": - procNetstat.PAWSEstab = &value - case "DelayedACKs": - procNetstat.DelayedACKs = &value - case "DelayedACKLocked": - procNetstat.DelayedACKLocked = &value - case "DelayedACKLost": - procNetstat.DelayedACKLost = &value - case "ListenOverflows": - procNetstat.ListenOverflows = &value - case "ListenDrops": - procNetstat.ListenDrops = &value - case "TCPHPHits": - procNetstat.TCPHPHits = &value - case "TCPPureAcks": - procNetstat.TCPPureAcks = &value - case "TCPHPAcks": - procNetstat.TCPHPAcks = &value - case "TCPRenoRecovery": - procNetstat.TCPRenoRecovery = &value - case "TCPSackRecovery": - procNetstat.TCPSackRecovery = &value - case "TCPSACKReneging": - procNetstat.TCPSACKReneging = &value - case "TCPSACKReorder": - procNetstat.TCPSACKReorder = &value - case "TCPRenoReorder": - procNetstat.TCPRenoReorder = &value - case "TCPTSReorder": - procNetstat.TCPTSReorder = &value - case "TCPFullUndo": - procNetstat.TCPFullUndo = &value - case "TCPPartialUndo": - procNetstat.TCPPartialUndo = &value - case "TCPDSACKUndo": - procNetstat.TCPDSACKUndo = &value - case "TCPLossUndo": - procNetstat.TCPLossUndo = &value - case "TCPLostRetransmit": - procNetstat.TCPLostRetransmit = &value - case "TCPRenoFailures": - procNetstat.TCPRenoFailures = &value - case "TCPSackFailures": - procNetstat.TCPSackFailures = &value - case "TCPLossFailures": - procNetstat.TCPLossFailures = &value - case "TCPFastRetrans": - procNetstat.TCPFastRetrans = &value - case "TCPSlowStartRetrans": - procNetstat.TCPSlowStartRetrans = &value - case "TCPTimeouts": - procNetstat.TCPTimeouts = &value - case "TCPLossProbes": - procNetstat.TCPLossProbes = &value - case "TCPLossProbeRecovery": - procNetstat.TCPLossProbeRecovery = &value - case "TCPRenoRecoveryFail": - procNetstat.TCPRenoRecoveryFail = &value - case "TCPSackRecoveryFail": - procNetstat.TCPSackRecoveryFail = &value - case "TCPRcvCollapsed": - procNetstat.TCPRcvCollapsed = &value - case "TCPDSACKOldSent": - procNetstat.TCPDSACKOldSent = &value - case "TCPDSACKOfoSent": - procNetstat.TCPDSACKOfoSent = &value - case "TCPDSACKRecv": - procNetstat.TCPDSACKRecv = &value - case "TCPDSACKOfoRecv": - procNetstat.TCPDSACKOfoRecv = &value - case "TCPAbortOnData": - procNetstat.TCPAbortOnData = &value - case "TCPAbortOnClose": - procNetstat.TCPAbortOnClose = &value - case "TCPDeferAcceptDrop": - procNetstat.TCPDeferAcceptDrop = &value - case "IPReversePathFilter": - procNetstat.IPReversePathFilter = &value - case "TCPTimeWaitOverflow": - procNetstat.TCPTimeWaitOverflow = &value - case "TCPReqQFullDoCookies": - procNetstat.TCPReqQFullDoCookies = &value - case "TCPReqQFullDrop": - procNetstat.TCPReqQFullDrop = &value - case "TCPRetransFail": - procNetstat.TCPRetransFail = &value - case "TCPRcvCoalesce": - procNetstat.TCPRcvCoalesce = &value - case "TCPRcvQDrop": - procNetstat.TCPRcvQDrop = &value - case "TCPOFOQueue": - procNetstat.TCPOFOQueue = &value - case "TCPOFODrop": - procNetstat.TCPOFODrop = &value - case "TCPOFOMerge": - procNetstat.TCPOFOMerge = &value - case "TCPChallengeACK": - procNetstat.TCPChallengeACK = &value - case "TCPSYNChallenge": - procNetstat.TCPSYNChallenge = &value - case "TCPFastOpenActive": - procNetstat.TCPFastOpenActive = &value - case "TCPFastOpenActiveFail": - procNetstat.TCPFastOpenActiveFail = &value - case "TCPFastOpenPassive": - procNetstat.TCPFastOpenPassive = &value - case "TCPFastOpenPassiveFail": - procNetstat.TCPFastOpenPassiveFail = &value - case "TCPFastOpenListenOverflow": - procNetstat.TCPFastOpenListenOverflow = &value - case "TCPFastOpenCookieReqd": - procNetstat.TCPFastOpenCookieReqd = &value - case "TCPFastOpenBlackhole": - procNetstat.TCPFastOpenBlackhole = &value - case "TCPSpuriousRtxHostQueues": - procNetstat.TCPSpuriousRtxHostQueues = &value - case "BusyPollRxPackets": - procNetstat.BusyPollRxPackets = &value - case "TCPAutoCorking": - procNetstat.TCPAutoCorking = &value - case "TCPFromZeroWindowAdv": - procNetstat.TCPFromZeroWindowAdv = &value - case "TCPToZeroWindowAdv": - procNetstat.TCPToZeroWindowAdv = &value - case "TCPWantZeroWindowAdv": - procNetstat.TCPWantZeroWindowAdv = &value - case "TCPSynRetrans": - procNetstat.TCPSynRetrans = &value - case "TCPOrigDataSent": - procNetstat.TCPOrigDataSent = &value - case "TCPHystartTrainDetect": - procNetstat.TCPHystartTrainDetect = &value - case "TCPHystartTrainCwnd": - procNetstat.TCPHystartTrainCwnd = &value - case "TCPHystartDelayDetect": - procNetstat.TCPHystartDelayDetect = &value - case "TCPHystartDelayCwnd": - procNetstat.TCPHystartDelayCwnd = &value - case "TCPACKSkippedSynRecv": - procNetstat.TCPACKSkippedSynRecv = &value - case "TCPACKSkippedPAWS": - procNetstat.TCPACKSkippedPAWS = &value - case "TCPACKSkippedSeq": - procNetstat.TCPACKSkippedSeq = &value - case "TCPACKSkippedFinWait2": - procNetstat.TCPACKSkippedFinWait2 = &value - case "TCPACKSkippedTimeWait": - procNetstat.TCPACKSkippedTimeWait = &value - case "TCPACKSkippedChallenge": - procNetstat.TCPACKSkippedChallenge = &value - case "TCPWinProbe": - procNetstat.TCPWinProbe = &value - case "TCPKeepAlive": - procNetstat.TCPKeepAlive = &value - case "TCPMTUPFail": - procNetstat.TCPMTUPFail = &value - case "TCPMTUPSuccess": - procNetstat.TCPMTUPSuccess = &value - case "TCPWqueueTooBig": - procNetstat.TCPWqueueTooBig = &value - } - case "IpExt": - switch key { - case "InNoRoutes": - procNetstat.InNoRoutes = &value - case "InTruncatedPkts": - procNetstat.InTruncatedPkts = &value - case "InMcastPkts": - procNetstat.InMcastPkts = &value - case "OutMcastPkts": - procNetstat.OutMcastPkts = &value - case "InBcastPkts": - procNetstat.InBcastPkts = &value - case "OutBcastPkts": - procNetstat.OutBcastPkts = &value - case "InOctets": - procNetstat.InOctets = &value - case "OutOctets": - procNetstat.OutOctets = &value - case "InMcastOctets": - procNetstat.InMcastOctets = &value - case "OutMcastOctets": - procNetstat.OutMcastOctets = &value - case "InBcastOctets": - procNetstat.InBcastOctets = &value - case "OutBcastOctets": - procNetstat.OutBcastOctets = &value - case "InCsumErrors": - procNetstat.InCsumErrors = &value - case "InNoECTPkts": - procNetstat.InNoECTPkts = &value - case "InECT1Pkts": - procNetstat.InECT1Pkts = &value - case "InECT0Pkts": - procNetstat.InECT0Pkts = &value - case "InCEPkts": - procNetstat.InCEPkts = &value - case "ReasmOverlaps": - procNetstat.ReasmOverlaps = &value - } - } - } - } - return procNetstat, scanner.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_ns.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index 0f8f847f95..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// Namespaces reads from /proc//ns/* to get the namespaces of which the -// process is a member. -func (p Proc) Namespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_psi.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_psi.go deleted file mode 100644 index ccd35f153a..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_psi.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// The PSI / pressure interface is described at -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt -// Each resource (cpu, io, memory, ...) is exposed as a single file. -// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. -// Each line contains several averages (over n seconds) and a total in µs. -// -// Example io pressure file: -// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 -// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" - -// PSILine is a single line of values as returned by `/proc/pressure/*`. -// -// The Avg entries are averages over n seconds, as a percentage. -// The Total line is in microseconds. -type PSILine struct { - Avg10 float64 - Avg60 float64 - Avg300 float64 - Total uint64 -} - -// PSIStats represent pressure stall information from /proc/pressure/* -// -// "Some" indicates the share of time in which at least some tasks are stalled. -// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. -type PSIStats struct { - Some *PSILine - Full *PSILine -} - -// PSIStatsForResource reads pressure stall information for the specified -// resource from /proc/pressure/. At time of writing this can be -// either "cpu", "memory" or "io". -func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) - if err != nil { - return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) - } - - return parsePSIStats(bytes.NewReader(data)) -} - -// parsePSIStats parses the specified file for pressure stall information. -func parsePSIStats(r io.Reader) (PSIStats, error) { - psiStats := PSIStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - l := scanner.Text() - prefix := strings.Split(l, " ")[0] - switch prefix { - case "some": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Some = &psi - case "full": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Full = &psi - default: - // If we encounter a line with an unknown prefix, ignore it and move on - // Should new measurement types be added in the future we'll simply ignore them instead - // of erroring on retrieval - continue - } - } - - return psiStats, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_smaps.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_smaps.go deleted file mode 100644 index 9a297afcf8..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_smaps.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "bufio" - "errors" - "os" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - // Match the header line before each mapped zone in `/proc/pid/smaps`. - procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) -) - -type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM. - Rss uint64 - // Process's proportional share of this mapping. - Pss uint64 - // Size in bytes of clean shared pages. - SharedClean uint64 - // Size in bytes of dirty shared pages. - SharedDirty uint64 - // Size in bytes of clean private pages. - PrivateClean uint64 - // Size in bytes of dirty private pages. - PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed. - Referenced uint64 - // Amount of memory that does not belong to any file. - Anonymous uint64 - // Amount would-be-anonymous memory currently on swap. - Swap uint64 - // Process's proportional memory on swap. - SwapPss uint64 -} - -// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the -// process. -// -// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will -// we read and summed. -func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { - data, err := util.ReadFileNoStat(p.path("smaps_rollup")) - if err != nil && os.IsNotExist(err) { - return p.procSMapsRollupManual() - } - if err != nil { - return ProcSMapsRollup{}, err - } - - lines := strings.Split(string(data), "\n") - smaps := ProcSMapsRollup{} - - // skip first line which don't contains information we need - lines = lines[1:] - for _, line := range lines { - if line == "" { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -// Read /proc/pid/smaps and do the roll-up in Go code. -func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { - file, err := os.Open(p.path("smaps")) - if err != nil { - return ProcSMapsRollup{}, err - } - defer file.Close() - - smaps := ProcSMapsRollup{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - line := scan.Text() - - if procSMapsHeaderLine.MatchString(line) { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -func (s *ProcSMapsRollup) parseLine(line string) error { - kv := strings.SplitN(line, ":", 2) - if len(kv) != 2 { - return errors.New("invalid net/dev line, missing colon") - } - - k := kv[0] - if k == "VmFlags" { - return nil - } - - v := strings.TrimSpace(kv[1]) - v = strings.TrimSuffix(v, " kB") - - vKBytes, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return err - } - vBytes := vKBytes * 1024 - - s.addValue(k, vBytes) - - return nil -} - -func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { - switch k { - case "Rss": - s.Rss += vUintBytes - case "Pss": - s.Pss += vUintBytes - case "Shared_Clean": - s.SharedClean += vUintBytes - case "Shared_Dirty": - s.SharedDirty += vUintBytes - case "Private_Clean": - s.PrivateClean += vUintBytes - case "Private_Dirty": - s.PrivateDirty += vUintBytes - case "Referenced": - s.Referenced += vUintBytes - case "Anonymous": - s.Anonymous += vUintBytes - case "Swap": - s.Swap += vUintBytes - case "SwapPss": - s.SwapPss += vUintBytes - } -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_snmp.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_snmp.go deleted file mode 100644 index 4bdc90b07e..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_snmp.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcSnmp models the content of /proc//net/snmp. -type ProcSnmp struct { - // The process ID. - PID int - Ip - Icmp - IcmpMsg - Tcp - Udp - UdpLite -} - -type Ip struct { // nolint:revive - Forwarding *float64 - DefaultTTL *float64 - InReceives *float64 - InHdrErrors *float64 - InAddrErrors *float64 - ForwDatagrams *float64 - InUnknownProtos *float64 - InDiscards *float64 - InDelivers *float64 - OutRequests *float64 - OutDiscards *float64 - OutNoRoutes *float64 - ReasmTimeout *float64 - ReasmReqds *float64 - ReasmOKs *float64 - ReasmFails *float64 - FragOKs *float64 - FragFails *float64 - FragCreates *float64 -} - -type Icmp struct { // nolint:revive - InMsgs *float64 - InErrors *float64 - InCsumErrors *float64 - InDestUnreachs *float64 - InTimeExcds *float64 - InParmProbs *float64 - InSrcQuenchs *float64 - InRedirects *float64 - InEchos *float64 - InEchoReps *float64 - InTimestamps *float64 - InTimestampReps *float64 - InAddrMasks *float64 - InAddrMaskReps *float64 - OutMsgs *float64 - OutErrors *float64 - OutDestUnreachs *float64 - OutTimeExcds *float64 - OutParmProbs *float64 - OutSrcQuenchs *float64 - OutRedirects *float64 - OutEchos *float64 - OutEchoReps *float64 - OutTimestamps *float64 - OutTimestampReps *float64 - OutAddrMasks *float64 - OutAddrMaskReps *float64 -} - -type IcmpMsg struct { - InType3 *float64 - OutType3 *float64 -} - -type Tcp struct { // nolint:revive - RtoAlgorithm *float64 - RtoMin *float64 - RtoMax *float64 - MaxConn *float64 - ActiveOpens *float64 - PassiveOpens *float64 - AttemptFails *float64 - EstabResets *float64 - CurrEstab *float64 - InSegs *float64 - OutSegs *float64 - RetransSegs *float64 - InErrs *float64 - OutRsts *float64 - InCsumErrors *float64 -} - -type Udp struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -type UdpLite struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -func (p Proc) Snmp() (ProcSnmp, error) { - filename := p.path("net/snmp") - data, err := util.ReadFileNoStat(filename) - if err != nil { - return ProcSnmp{PID: p.PID}, err - } - procSnmp, err := parseSnmp(bytes.NewReader(data), filename) - procSnmp.PID = p.PID - return procSnmp, err -} - -// parseSnmp parses the metrics from proc//net/snmp file -// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). -func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { - var ( - scanner = bufio.NewScanner(r) - procSnmp = ProcSnmp{} - ) - - for scanner.Scan() { - nameParts := strings.Split(scanner.Text(), " ") - scanner.Scan() - valueParts := strings.Split(scanner.Text(), " ") - // Remove trailing :. - protocol := strings.TrimSuffix(nameParts[0], ":") - if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", - ErrFileParse, fileName, protocol) - } - for i := 1; i < len(nameParts); i++ { - value, err := strconv.ParseFloat(valueParts[i], 64) - if err != nil { - return procSnmp, err - } - key := nameParts[i] - - switch protocol { - case "Ip": - switch key { - case "Forwarding": - procSnmp.Forwarding = &value - case "DefaultTTL": - procSnmp.DefaultTTL = &value - case "InReceives": - procSnmp.InReceives = &value - case "InHdrErrors": - procSnmp.InHdrErrors = &value - case "InAddrErrors": - procSnmp.InAddrErrors = &value - case "ForwDatagrams": - procSnmp.ForwDatagrams = &value - case "InUnknownProtos": - procSnmp.InUnknownProtos = &value - case "InDiscards": - procSnmp.InDiscards = &value - case "InDelivers": - procSnmp.InDelivers = &value - case "OutRequests": - procSnmp.OutRequests = &value - case "OutDiscards": - procSnmp.OutDiscards = &value - case "OutNoRoutes": - procSnmp.OutNoRoutes = &value - case "ReasmTimeout": - procSnmp.ReasmTimeout = &value - case "ReasmReqds": - procSnmp.ReasmReqds = &value - case "ReasmOKs": - procSnmp.ReasmOKs = &value - case "ReasmFails": - procSnmp.ReasmFails = &value - case "FragOKs": - procSnmp.FragOKs = &value - case "FragFails": - procSnmp.FragFails = &value - case "FragCreates": - procSnmp.FragCreates = &value - } - case "Icmp": - switch key { - case "InMsgs": - procSnmp.InMsgs = &value - case "InErrors": - procSnmp.Icmp.InErrors = &value - case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = &value - case "InDestUnreachs": - procSnmp.InDestUnreachs = &value - case "InTimeExcds": - procSnmp.InTimeExcds = &value - case "InParmProbs": - procSnmp.InParmProbs = &value - case "InSrcQuenchs": - procSnmp.InSrcQuenchs = &value - case "InRedirects": - procSnmp.InRedirects = &value - case "InEchos": - procSnmp.InEchos = &value - case "InEchoReps": - procSnmp.InEchoReps = &value - case "InTimestamps": - procSnmp.InTimestamps = &value - case "InTimestampReps": - procSnmp.InTimestampReps = &value - case "InAddrMasks": - procSnmp.InAddrMasks = &value - case "InAddrMaskReps": - procSnmp.InAddrMaskReps = &value - case "OutMsgs": - procSnmp.OutMsgs = &value - case "OutErrors": - procSnmp.OutErrors = &value - case "OutDestUnreachs": - procSnmp.OutDestUnreachs = &value - case "OutTimeExcds": - procSnmp.OutTimeExcds = &value - case "OutParmProbs": - procSnmp.OutParmProbs = &value - case "OutSrcQuenchs": - procSnmp.OutSrcQuenchs = &value - case "OutRedirects": - procSnmp.OutRedirects = &value - case "OutEchos": - procSnmp.OutEchos = &value - case "OutEchoReps": - procSnmp.OutEchoReps = &value - case "OutTimestamps": - procSnmp.OutTimestamps = &value - case "OutTimestampReps": - procSnmp.OutTimestampReps = &value - case "OutAddrMasks": - procSnmp.OutAddrMasks = &value - case "OutAddrMaskReps": - procSnmp.OutAddrMaskReps = &value - } - case "IcmpMsg": - switch key { - case "InType3": - procSnmp.InType3 = &value - case "OutType3": - procSnmp.OutType3 = &value - } - case "Tcp": - switch key { - case "RtoAlgorithm": - procSnmp.RtoAlgorithm = &value - case "RtoMin": - procSnmp.RtoMin = &value - case "RtoMax": - procSnmp.RtoMax = &value - case "MaxConn": - procSnmp.MaxConn = &value - case "ActiveOpens": - procSnmp.ActiveOpens = &value - case "PassiveOpens": - procSnmp.PassiveOpens = &value - case "AttemptFails": - procSnmp.AttemptFails = &value - case "EstabResets": - procSnmp.EstabResets = &value - case "CurrEstab": - procSnmp.CurrEstab = &value - case "InSegs": - procSnmp.InSegs = &value - case "OutSegs": - procSnmp.OutSegs = &value - case "RetransSegs": - procSnmp.RetransSegs = &value - case "InErrs": - procSnmp.InErrs = &value - case "OutRsts": - procSnmp.OutRsts = &value - case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = &value - } - case "Udp": - switch key { - case "InDatagrams": - procSnmp.Udp.InDatagrams = &value - case "NoPorts": - procSnmp.Udp.NoPorts = &value - case "InErrors": - procSnmp.Udp.InErrors = &value - case "OutDatagrams": - procSnmp.Udp.OutDatagrams = &value - case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = &value - case "SndbufErrors": - procSnmp.Udp.SndbufErrors = &value - case "InCsumErrors": - procSnmp.Udp.InCsumErrors = &value - case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = &value - } - case "UdpLite": - switch key { - case "InDatagrams": - procSnmp.UdpLite.InDatagrams = &value - case "NoPorts": - procSnmp.UdpLite.NoPorts = &value - case "InErrors": - procSnmp.UdpLite.InErrors = &value - case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = &value - case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = &value - case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = &value - case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = &value - case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = &value - } - } - } - } - return procSnmp, scanner.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go deleted file mode 100644 index fb7fd3995b..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "io" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcSnmp6 models the content of /proc//net/snmp6. -type ProcSnmp6 struct { - // The process ID. - PID int - Ip6 - Icmp6 - Udp6 - UdpLite6 -} - -type Ip6 struct { // nolint:revive - InReceives *float64 - InHdrErrors *float64 - InTooBigErrors *float64 - InNoRoutes *float64 - InAddrErrors *float64 - InUnknownProtos *float64 - InTruncatedPkts *float64 - InDiscards *float64 - InDelivers *float64 - OutForwDatagrams *float64 - OutRequests *float64 - OutDiscards *float64 - OutNoRoutes *float64 - ReasmTimeout *float64 - ReasmReqds *float64 - ReasmOKs *float64 - ReasmFails *float64 - FragOKs *float64 - FragFails *float64 - FragCreates *float64 - InMcastPkts *float64 - OutMcastPkts *float64 - InOctets *float64 - OutOctets *float64 - InMcastOctets *float64 - OutMcastOctets *float64 - InBcastOctets *float64 - OutBcastOctets *float64 - InNoECTPkts *float64 - InECT1Pkts *float64 - InECT0Pkts *float64 - InCEPkts *float64 -} - -type Icmp6 struct { - InMsgs *float64 - InErrors *float64 - OutMsgs *float64 - OutErrors *float64 - InCsumErrors *float64 - InDestUnreachs *float64 - InPktTooBigs *float64 - InTimeExcds *float64 - InParmProblems *float64 - InEchos *float64 - InEchoReplies *float64 - InGroupMembQueries *float64 - InGroupMembResponses *float64 - InGroupMembReductions *float64 - InRouterSolicits *float64 - InRouterAdvertisements *float64 - InNeighborSolicits *float64 - InNeighborAdvertisements *float64 - InRedirects *float64 - InMLDv2Reports *float64 - OutDestUnreachs *float64 - OutPktTooBigs *float64 - OutTimeExcds *float64 - OutParmProblems *float64 - OutEchos *float64 - OutEchoReplies *float64 - OutGroupMembQueries *float64 - OutGroupMembResponses *float64 - OutGroupMembReductions *float64 - OutRouterSolicits *float64 - OutRouterAdvertisements *float64 - OutNeighborSolicits *float64 - OutNeighborAdvertisements *float64 - OutRedirects *float64 - OutMLDv2Reports *float64 - InType1 *float64 - InType134 *float64 - InType135 *float64 - InType136 *float64 - InType143 *float64 - OutType133 *float64 - OutType135 *float64 - OutType136 *float64 - OutType143 *float64 -} - -type Udp6 struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -type UdpLite6 struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 -} - -func (p Proc) Snmp6() (ProcSnmp6, error) { - filename := p.path("net/snmp6") - data, err := util.ReadFileNoStat(filename) - if err != nil { - // On systems with IPv6 disabled, this file won't exist. - // Do nothing. - if errors.Is(err, os.ErrNotExist) { - return ProcSnmp6{PID: p.PID}, nil - } - - return ProcSnmp6{PID: p.PID}, err - } - - procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) - procSnmp6.PID = p.PID - return procSnmp6, err -} - -// parseSnmp6 parses the metrics from proc//net/snmp6 file -// and returns a map contains those metrics. -func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { - var ( - scanner = bufio.NewScanner(r) - procSnmp6 = ProcSnmp6{} - ) - - for scanner.Scan() { - stat := strings.Fields(scanner.Text()) - if len(stat) < 2 { - continue - } - // Expect to have "6" in metric name, skip line otherwise - if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { - protocol := stat[0][:sixIndex+1] - key := stat[0][sixIndex+1:] - value, err := strconv.ParseFloat(stat[1], 64) - if err != nil { - return procSnmp6, err - } - - switch protocol { - case "Ip6": - switch key { - case "InReceives": - procSnmp6.InReceives = &value - case "InHdrErrors": - procSnmp6.InHdrErrors = &value - case "InTooBigErrors": - procSnmp6.InTooBigErrors = &value - case "InNoRoutes": - procSnmp6.InNoRoutes = &value - case "InAddrErrors": - procSnmp6.InAddrErrors = &value - case "InUnknownProtos": - procSnmp6.InUnknownProtos = &value - case "InTruncatedPkts": - procSnmp6.InTruncatedPkts = &value - case "InDiscards": - procSnmp6.InDiscards = &value - case "InDelivers": - procSnmp6.InDelivers = &value - case "OutForwDatagrams": - procSnmp6.OutForwDatagrams = &value - case "OutRequests": - procSnmp6.OutRequests = &value - case "OutDiscards": - procSnmp6.OutDiscards = &value - case "OutNoRoutes": - procSnmp6.OutNoRoutes = &value - case "ReasmTimeout": - procSnmp6.ReasmTimeout = &value - case "ReasmReqds": - procSnmp6.ReasmReqds = &value - case "ReasmOKs": - procSnmp6.ReasmOKs = &value - case "ReasmFails": - procSnmp6.ReasmFails = &value - case "FragOKs": - procSnmp6.FragOKs = &value - case "FragFails": - procSnmp6.FragFails = &value - case "FragCreates": - procSnmp6.FragCreates = &value - case "InMcastPkts": - procSnmp6.InMcastPkts = &value - case "OutMcastPkts": - procSnmp6.OutMcastPkts = &value - case "InOctets": - procSnmp6.InOctets = &value - case "OutOctets": - procSnmp6.OutOctets = &value - case "InMcastOctets": - procSnmp6.InMcastOctets = &value - case "OutMcastOctets": - procSnmp6.OutMcastOctets = &value - case "InBcastOctets": - procSnmp6.InBcastOctets = &value - case "OutBcastOctets": - procSnmp6.OutBcastOctets = &value - case "InNoECTPkts": - procSnmp6.InNoECTPkts = &value - case "InECT1Pkts": - procSnmp6.InECT1Pkts = &value - case "InECT0Pkts": - procSnmp6.InECT0Pkts = &value - case "InCEPkts": - procSnmp6.InCEPkts = &value - - } - case "Icmp6": - switch key { - case "InMsgs": - procSnmp6.InMsgs = &value - case "InErrors": - procSnmp6.Icmp6.InErrors = &value - case "OutMsgs": - procSnmp6.OutMsgs = &value - case "OutErrors": - procSnmp6.OutErrors = &value - case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = &value - case "InDestUnreachs": - procSnmp6.InDestUnreachs = &value - case "InPktTooBigs": - procSnmp6.InPktTooBigs = &value - case "InTimeExcds": - procSnmp6.InTimeExcds = &value - case "InParmProblems": - procSnmp6.InParmProblems = &value - case "InEchos": - procSnmp6.InEchos = &value - case "InEchoReplies": - procSnmp6.InEchoReplies = &value - case "InGroupMembQueries": - procSnmp6.InGroupMembQueries = &value - case "InGroupMembResponses": - procSnmp6.InGroupMembResponses = &value - case "InGroupMembReductions": - procSnmp6.InGroupMembReductions = &value - case "InRouterSolicits": - procSnmp6.InRouterSolicits = &value - case "InRouterAdvertisements": - procSnmp6.InRouterAdvertisements = &value - case "InNeighborSolicits": - procSnmp6.InNeighborSolicits = &value - case "InNeighborAdvertisements": - procSnmp6.InNeighborAdvertisements = &value - case "InRedirects": - procSnmp6.InRedirects = &value - case "InMLDv2Reports": - procSnmp6.InMLDv2Reports = &value - case "OutDestUnreachs": - procSnmp6.OutDestUnreachs = &value - case "OutPktTooBigs": - procSnmp6.OutPktTooBigs = &value - case "OutTimeExcds": - procSnmp6.OutTimeExcds = &value - case "OutParmProblems": - procSnmp6.OutParmProblems = &value - case "OutEchos": - procSnmp6.OutEchos = &value - case "OutEchoReplies": - procSnmp6.OutEchoReplies = &value - case "OutGroupMembQueries": - procSnmp6.OutGroupMembQueries = &value - case "OutGroupMembResponses": - procSnmp6.OutGroupMembResponses = &value - case "OutGroupMembReductions": - procSnmp6.OutGroupMembReductions = &value - case "OutRouterSolicits": - procSnmp6.OutRouterSolicits = &value - case "OutRouterAdvertisements": - procSnmp6.OutRouterAdvertisements = &value - case "OutNeighborSolicits": - procSnmp6.OutNeighborSolicits = &value - case "OutNeighborAdvertisements": - procSnmp6.OutNeighborAdvertisements = &value - case "OutRedirects": - procSnmp6.OutRedirects = &value - case "OutMLDv2Reports": - procSnmp6.OutMLDv2Reports = &value - case "InType1": - procSnmp6.InType1 = &value - case "InType134": - procSnmp6.InType134 = &value - case "InType135": - procSnmp6.InType135 = &value - case "InType136": - procSnmp6.InType136 = &value - case "InType143": - procSnmp6.InType143 = &value - case "OutType133": - procSnmp6.OutType133 = &value - case "OutType135": - procSnmp6.OutType135 = &value - case "OutType136": - procSnmp6.OutType136 = &value - case "OutType143": - procSnmp6.OutType143 = &value - } - case "Udp6": - switch key { - case "InDatagrams": - procSnmp6.Udp6.InDatagrams = &value - case "NoPorts": - procSnmp6.Udp6.NoPorts = &value - case "InErrors": - procSnmp6.Udp6.InErrors = &value - case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = &value - case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = &value - case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = &value - case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = &value - case "IgnoredMulti": - procSnmp6.IgnoredMulti = &value - } - case "UdpLite6": - switch key { - case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = &value - case "NoPorts": - procSnmp6.UdpLite6.NoPorts = &value - case "InErrors": - procSnmp6.UdpLite6.InErrors = &value - case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = &value - case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = &value - case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = &value - case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = &value - } - } - } - } - return procSnmp6, scanner.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_stat.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index 06a8d931c9..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "os" - - "github.com/prometheus/procfs/internal/util" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime int - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime int - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize uint - // Resident set size in pages. - RSS int - // Soft limit in bytes on the rss of the process. - RSSLimit uint64 - // CPU number last executed on. - Processor uint - // Real-time scheduling priority, a number in the range 1 to 99 for processes - // scheduled under a real-time policy, or 0, for non-real-time processes. - RTPriority uint - // Scheduling policy. - Policy uint - // Aggregated block I/O delays, measured in clock ticks (centiseconds). - DelayAcctBlkIOTicks uint64 - // Guest time of the process (time spent running a virtual CPU for a guest - // operating system), measured in clock ticks. - GuestTime int - // Guest time of the process's children, measured in clock ticks. - CGuestTime int - - proc FS -} - -// NewStat returns the current status information of the process. -// -// Deprecated: Use p.Stat() instead. -func (p Proc) NewStat() (ProcStat, error) { - return p.Stat() -} - -// Stat returns the current status information of the process. -func (p Proc) Stat() (ProcStat, error) { - data, err := util.ReadFileNoStat(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - - var ( - ignoreInt64 int64 - ignoreUint64 uint64 - - s = ProcStat{PID: p.PID, proc: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) - } - - s.Comm = string(data[l+1 : r]) - - // Check the following resources for the details about the particular stat - // fields and their data types: - // * https://man7.org/linux/man-pages/man5/proc.5.html - // * https://man7.org/linux/man-pages/man3/scanf.3.html - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignoreInt64, - &s.Starttime, - &s.VSize, - &s.RSS, - &s.RSSLimit, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreInt64, - &s.Processor, - &s.RTPriority, - &s.Policy, - &s.DelayAcctBlkIOTicks, - &s.GuestTime, - &s.CGuestTime, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() uint { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - stat, err := s.proc.Stat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_status.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_status.go deleted file mode 100644 index dd8aa56885..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_status.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "math/bits" - "sort" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcStatus provides status information about the process, -// read from /proc/[pid]/status. -type ProcStatus struct { - // The process ID. - PID int - // The process name. - Name string - - // Thread group ID. - TGID int - // List of Pid namespace. - NSpids []uint64 - - // Peak virtual memory size. - VmPeak uint64 // nolint:revive - // Virtual memory size. - VmSize uint64 // nolint:revive - // Locked memory size. - VmLck uint64 // nolint:revive - // Pinned memory size. - VmPin uint64 // nolint:revive - // Peak resident set size. - VmHWM uint64 // nolint:revive - // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:revive - // Size of resident anonymous memory. - RssAnon uint64 // nolint:revive - // Size of resident file mappings. - RssFile uint64 // nolint:revive - // Size of resident shared memory. - RssShmem uint64 // nolint:revive - // Size of data segments. - VmData uint64 // nolint:revive - // Size of stack segments. - VmStk uint64 // nolint:revive - // Size of text segments. - VmExe uint64 // nolint:revive - // Shared library code size. - VmLib uint64 // nolint:revive - // Page table entries size. - VmPTE uint64 // nolint:revive - // Size of second-level page tables. - VmPMD uint64 // nolint:revive - // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:revive - // Size of hugetlb memory portions - HugetlbPages uint64 - - // Number of voluntary context switches. - VoluntaryCtxtSwitches uint64 - // Number of involuntary context switches. - NonVoluntaryCtxtSwitches uint64 - - // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]uint64 - // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]uint64 - - // CpusAllowedList: List of cpu cores processes are allowed to run on. - CpusAllowedList []uint64 -} - -// NewStatus returns the current status information of the process. -func (p Proc) NewStatus() (ProcStatus, error) { - data, err := util.ReadFileNoStat(p.path("status")) - if err != nil { - return ProcStatus{}, err - } - - s := ProcStatus{PID: p.PID} - - lines := strings.Split(string(data), "\n") - for _, line := range lines { - if !bytes.Contains([]byte(line), []byte(":")) { - continue - } - - kv := strings.SplitN(line, ":", 2) - - // removes spaces - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - // removes "kB" - v = strings.TrimSuffix(v, " kB") - - // value to int when possible - // we can skip error check here, 'cause vKBytes is not used when value is a string - vKBytes, _ := strconv.ParseUint(v, 10, 64) - // convert kB to B - vBytes := vKBytes * 1024 - - err = s.fillStatus(k, v, vKBytes, vBytes) - if err != nil { - return ProcStatus{}, err - } - } - - return s, nil -} - -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { - switch k { - case "Tgid": - s.TGID = int(vUint) - case "Name": - s.Name = vString - case "Uid": - var err error - for i, v := range strings.Split(vString, "\t") { - s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) - if err != nil { - return err - } - } - case "Gid": - var err error - for i, v := range strings.Split(vString, "\t") { - s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) - if err != nil { - return err - } - } - case "NSpid": - nspids, err := calcNSPidsList(vString) - if err != nil { - return err - } - s.NSpids = nspids - case "VmPeak": - s.VmPeak = vUintBytes - case "VmSize": - s.VmSize = vUintBytes - case "VmLck": - s.VmLck = vUintBytes - case "VmPin": - s.VmPin = vUintBytes - case "VmHWM": - s.VmHWM = vUintBytes - case "VmRSS": - s.VmRSS = vUintBytes - case "RssAnon": - s.RssAnon = vUintBytes - case "RssFile": - s.RssFile = vUintBytes - case "RssShmem": - s.RssShmem = vUintBytes - case "VmData": - s.VmData = vUintBytes - case "VmStk": - s.VmStk = vUintBytes - case "VmExe": - s.VmExe = vUintBytes - case "VmLib": - s.VmLib = vUintBytes - case "VmPTE": - s.VmPTE = vUintBytes - case "VmPMD": - s.VmPMD = vUintBytes - case "VmSwap": - s.VmSwap = vUintBytes - case "HugetlbPages": - s.HugetlbPages = vUintBytes - case "voluntary_ctxt_switches": - s.VoluntaryCtxtSwitches = vUint - case "nonvoluntary_ctxt_switches": - s.NonVoluntaryCtxtSwitches = vUint - case "Cpus_allowed_list": - s.CpusAllowedList = calcCpusAllowedList(vString) - } - - return nil -} - -// TotalCtxtSwitches returns the total context switch. -func (s ProcStatus) TotalCtxtSwitches() uint64 { - return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches -} - -func calcCpusAllowedList(cpuString string) []uint64 { - s := strings.Split(cpuString, ",") - - var g []uint64 - - for _, cpu := range s { - // parse cpu ranges, example: 1-3=[1,2,3] - if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { - startCPU, _ := strconv.ParseUint(l[0], 10, 64) - endCPU, _ := strconv.ParseUint(l[1], 10, 64) - - for i := startCPU; i <= endCPU; i++ { - g = append(g, i) - } - } else if len(l) == 1 { - cpu, _ := strconv.ParseUint(l[0], 10, 64) - g = append(g, cpu) - } - - } - - sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) - return g -} - -func calcNSPidsList(nspidsString string) ([]uint64, error) { - s := strings.Split(nspidsString, "\t") - var nspids []uint64 - - for _, nspid := range s { - nspid, err := strconv.ParseUint(nspid, 10, 64) - if err != nil { - return nil, err - } - nspids = append(nspids, nspid) - } - - return nspids, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/proc_sys.go b/openshift/tools/vendor/github.com/prometheus/procfs/proc_sys.go deleted file mode 100644 index 3810d1ac99..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/proc_sys.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -func sysctlToPath(sysctl string) string { - return strings.ReplaceAll(sysctl, ".", "/") -} - -func (fs FS) SysctlStrings(sysctl string) ([]string, error) { - value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) - if err != nil { - return nil, err - } - return strings.Fields(value), nil - -} - -func (fs FS) SysctlInts(sysctl string) ([]int, error) { - fields, err := fs.SysctlStrings(sysctl) - if err != nil { - return nil, err - } - - values := make([]int, len(fields)) - for i, f := range fields { - vp := util.NewValueParser(f) - values[i] = vp.Int() - if err := vp.Err(); err != nil { - return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) - } - } - return values, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/schedstat.go b/openshift/tools/vendor/github.com/prometheus/procfs/schedstat.go deleted file mode 100644 index 5f7f32dc83..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/schedstat.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "regexp" - "strconv" -) - -var ( - cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) - procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) -) - -// Schedstat contains scheduler statistics from /proc/schedstat -// -// See -// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt -// for a detailed description of what these numbers mean. -// -// Note the current kernel documentation claims some of the time units are in -// jiffies when they are actually in nanoseconds since 2.6.23 with the -// introduction of CFS. A fix to the documentation is pending. See -// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 -type Schedstat struct { - CPUs []*SchedstatCPU -} - -// SchedstatCPU contains the values from one "cpu" line. -type SchedstatCPU struct { - CPUNum string - - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// ProcSchedstat contains the values from `/proc//schedstat`. -type ProcSchedstat struct { - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// Schedstat reads data from `/proc/schedstat`. -func (fs FS) Schedstat() (*Schedstat, error) { - file, err := os.Open(fs.proc.Path("schedstat")) - if err != nil { - return nil, err - } - defer file.Close() - - stats := &Schedstat{} - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - match := cpuLineRE.FindStringSubmatch(scanner.Text()) - if match != nil { - cpu := &SchedstatCPU{} - cpu.CPUNum = match[1] - - cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) - if err != nil { - continue - } - - cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) - if err != nil { - continue - } - - cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) - if err != nil { - continue - } - - stats.CPUs = append(stats.CPUs, cpu) - } - } - - return stats, nil -} - -func parseProcSchedstat(contents string) (ProcSchedstat, error) { - var ( - stats ProcSchedstat - err error - ) - match := procLineRE.FindStringSubmatch(contents) - - if match != nil { - stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) - if err != nil { - return stats, err - } - - stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) - if err != nil { - return stats, err - } - - stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) - return stats, err - } - - return stats, errors.New("could not parse schedstat") -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/slab.go b/openshift/tools/vendor/github.com/prometheus/procfs/slab.go deleted file mode 100644 index 8611c90177..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/slab.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - slabSpace = regexp.MustCompile(`\s+`) - slabVer = regexp.MustCompile(`slabinfo -`) - slabHeader = regexp.MustCompile(`# name`) -) - -// Slab represents a slab pool in the kernel. -type Slab struct { - Name string - ObjActive int64 - ObjNum int64 - ObjSize int64 - ObjPerSlab int64 - PagesPerSlab int64 - // tunables - Limit int64 - Batch int64 - SharedFactor int64 - SlabActive int64 - SlabNum int64 - SharedAvail int64 -} - -// SlabInfo represents info for all slabs. -type SlabInfo struct { - Slabs []*Slab -} - -func shouldParseSlab(line string) bool { - if slabVer.MatchString(line) { - return false - } - if slabHeader.MatchString(line) { - return false - } - return true -} - -// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. -func parseV21SlabEntry(line string) (*Slab, error) { - // First cleanup whitespace. - l := slabSpace.ReplaceAllString(line, " ") - s := strings.Split(l, " ") - if len(s) != 16 { - return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) - } - var err error - i := &Slab{Name: s[0]} - i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) - if err != nil { - return nil, err - } - i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) - if err != nil { - return nil, err - } - i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) - if err != nil { - return nil, err - } - i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) - if err != nil { - return nil, err - } - i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) - if err != nil { - return nil, err - } - i.Limit, err = strconv.ParseInt(s[8], 10, 64) - if err != nil { - return nil, err - } - i.Batch, err = strconv.ParseInt(s[9], 10, 64) - if err != nil { - return nil, err - } - i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) - if err != nil { - return nil, err - } - i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) - if err != nil { - return nil, err - } - i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) - if err != nil { - return nil, err - } - i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) - if err != nil { - return nil, err - } - return i, nil -} - -// parseSlabInfo21 is used to parse a slabinfo 2.1 file. -func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { - scanner := bufio.NewScanner(r) - s := SlabInfo{Slabs: []*Slab{}} - for scanner.Scan() { - line := scanner.Text() - if !shouldParseSlab(line) { - continue - } - slab, err := parseV21SlabEntry(line) - if err != nil { - return s, err - } - s.Slabs = append(s.Slabs, slab) - } - return s, nil -} - -// SlabInfo reads data from `/proc/slabinfo`. -func (fs FS) SlabInfo() (SlabInfo, error) { - // TODO: Consider passing options to allow for parsing different - // slabinfo versions. However, slabinfo 2.1 has been stable since - // kernel 2.6.10 and later. - data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) - if err != nil { - return SlabInfo{}, err - } - - return parseSlabInfo21(bytes.NewReader(data)) -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/softirqs.go b/openshift/tools/vendor/github.com/prometheus/procfs/softirqs.go deleted file mode 100644 index 403e6ae708..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/softirqs.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Softirqs represents the softirq statistics. -type Softirqs struct { - Hi []uint64 - Timer []uint64 - NetTx []uint64 - NetRx []uint64 - Block []uint64 - IRQPoll []uint64 - Tasklet []uint64 - Sched []uint64 - HRTimer []uint64 - RCU []uint64 -} - -func (fs FS) Softirqs() (Softirqs, error) { - fileName := fs.proc.Path("softirqs") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Softirqs{}, err - } - - reader := bytes.NewReader(data) - - return parseSoftirqs(reader) -} - -func parseSoftirqs(r io.Reader) (Softirqs, error) { - var ( - softirqs = Softirqs{} - scanner = bufio.NewScanner(r) - ) - - if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) - } - - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - var err error - - // require at least one cpu - if len(parts) < 2 { - continue - } - switch parts[0] { - case "HI:": - perCPU := parts[1:] - softirqs.Hi = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) - } - } - case "TIMER:": - perCPU := parts[1:] - softirqs.Timer = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) - } - } - case "NET_TX:": - perCPU := parts[1:] - softirqs.NetTx = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) - } - } - case "NET_RX:": - perCPU := parts[1:] - softirqs.NetRx = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) - } - } - case "BLOCK:": - perCPU := parts[1:] - softirqs.Block = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) - } - } - case "IRQ_POLL:": - perCPU := parts[1:] - softirqs.IRQPoll = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) - } - } - case "TASKLET:": - perCPU := parts[1:] - softirqs.Tasklet = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) - } - } - case "SCHED:": - perCPU := parts[1:] - softirqs.Sched = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) - } - } - case "HRTIMER:": - perCPU := parts[1:] - softirqs.HRTimer = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) - } - } - case "RCU:": - perCPU := parts[1:] - softirqs.RCU = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) - } - } - } - } - - if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) - } - - return softirqs, scanner.Err() -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/stat.go b/openshift/tools/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index e36b41c18a..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading `/proc/softirqs`. -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU map[int64]CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat -} - -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) - } - - return softIRQStat, total, nil -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: Use fs.Stat() instead. -func NewStat() (Stat, error) { - fs, err := NewFS(fs.DefaultProcMountPoint) - if err != nil { - return Stat{}, err - } - return fs.Stat() -} - -// NewStat returns information about current cpu/process statistics. -// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: Use fs.Stat() instead. -func (fs FS) NewStat() (Stat, error) { - return fs.Stat() -} - -// Stat returns information about current cpu/process statistics. -// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Stat() (Stat, error) { - fileName := fs.proc.Path("stat") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Stat{}, err - } - procStat, err := parseStat(bytes.NewReader(data), fileName) - if err != nil { - return Stat{}, err - } - return procStat, nil -} - -// parseStat parses the metrics from /proc/[pid]/stat. -func parseStat(r io.Reader, fileName string) (Stat, error) { - var ( - scanner = bufio.NewScanner(r) - stat = Stat{ - CPU: make(map[int64]CPUStat), - } - err error - ) - - // Increase default scanner buffer to handle very long `intr` lines. - buf := make([]byte, 0, 8*1024) - scanner.Buffer(buf, 1024*1024) - - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - stat.CPU[cpuID] = cpuStat - } - } - } - - if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) - } - - return stat, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/swaps.go b/openshift/tools/vendor/github.com/prometheus/procfs/swaps.go deleted file mode 100644 index 65fec834bf..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/swaps.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Swap represents an entry in /proc/swaps. -type Swap struct { - Filename string - Type string - Size int - Used int - Priority int -} - -// Swaps returns a slice of all configured swap devices on the system. -func (fs FS) Swaps() ([]*Swap, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) - if err != nil { - return nil, err - } - return parseSwaps(data) -} - -func parseSwaps(info []byte) ([]*Swap, error) { - swaps := []*Swap{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - scanner.Scan() // ignore header line - for scanner.Scan() { - swapString := scanner.Text() - parsedSwap, err := parseSwapString(swapString) - if err != nil { - return nil, err - } - swaps = append(swaps, parsedSwap) - } - - err := scanner.Err() - return swaps, err -} - -func parseSwapString(swapString string) (*Swap, error) { - var err error - - swapFields := strings.Fields(swapString) - swapLength := len(swapFields) - if swapLength < 5 { - return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) - } - - swap := &Swap{ - Filename: swapFields[0], - Type: swapFields[1], - } - - swap.Size, err = strconv.Atoi(swapFields[2]) - if err != nil { - return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) - } - swap.Used, err = strconv.Atoi(swapFields[3]) - if err != nil { - return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) - } - swap.Priority, err = strconv.Atoi(swapFields[4]) - if err != nil { - return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) - } - - return swap, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/thread.go b/openshift/tools/vendor/github.com/prometheus/procfs/thread.go deleted file mode 100644 index 80e0e947be..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/thread.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - - fsi "github.com/prometheus/procfs/internal/fs" -) - -// Provide access to /proc/PID/task/TID files, for thread specific values. Since -// such files have the same structure as /proc/PID/ ones, the data structures -// and the parsers for the latter may be reused. - -// AllThreads returns a list of all currently available threads under /proc/PID. -func AllThreads(pid int) (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllThreads(pid) -} - -// AllThreads returns a list of all currently available threads for PID. -func (fs FS) AllThreads(pid int) (Procs, error) { - taskPath := fs.proc.Path(strconv.Itoa(pid), "task") - d, err := os.Open(taskPath) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) - } - - t := Procs{} - for _, n := range names { - tid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - - t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) - } - - return t, nil -} - -// Thread returns a process for a given PID, TID. -func (fs FS) Thread(pid, tid int) (Proc, error) { - taskPath := fs.proc.Path(strconv.Itoa(pid), "task") - if _, err := os.Stat(taskPath); err != nil { - return Proc{}, err - } - return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil -} - -// Thread returns a process for a given TID of Proc. -func (proc Proc) Thread(tid int) (Proc, error) { - tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} - if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { - return Proc{}, err - } - return Proc{PID: tid, fs: tfs}, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/ttar b/openshift/tools/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index 19ef02b8d4..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C

] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - --recursive-unlink (recursively delete existing directory if path - collides with file or directory to extract) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE -unset RECURSIVE_UNLINK - -while getopts :cf:-:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - -) - case $OPTARG in - recursive-unlink) - RECURSIVE_UNLINK="yes" - ;; - *) - echo -e "Error: invalid option -$OPTARG" - echo - usage 1 - ;; - esac - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -L "$path" ]; then - rm "$path" - elif [ -d "$path" ]; then - if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then - rm -r "$path" - else - # Safe because symlinks to directories are dealt with above - rmdir "$path" - fi - elif [ -e "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/vm.go b/openshift/tools/vendor/github.com/prometheus/procfs/vm.go deleted file mode 100644 index 51c49d89e8..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/vm.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// The VM interface is described at -// -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -// -// Each setting is exposed as a single file. -// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string. -type VM struct { - AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes - BlockDump *int64 // /proc/sys/vm/block_dump - CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed - DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes - DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio - DirtyBytes *int64 // /proc/sys/vm/dirty_bytes - DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs - DirtyRatio *int64 // /proc/sys/vm/dirty_ratio - DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds - DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs - DropCaches *int64 // /proc/sys/vm/drop_caches - ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold - HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group - LaptopMode *int64 // /proc/sys/vm/laptop_mode - LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout - LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio - MaxMapCount *int64 // /proc/sys/vm/max_map_count - MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill - MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery - MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes - MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio - MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio - MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr - NrHugepages *int64 // /proc/sys/vm/nr_hugepages - NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy - NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages - NumaStat *int64 // /proc/sys/vm/numa_stat - NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order - OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks - OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task - OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes - OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory - OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio - PageCluster *int64 // /proc/sys/vm/page-cluster - PanicOnOom *int64 // /proc/sys/vm/panic_on_oom - PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction - StatInterval *int64 // /proc/sys/vm/stat_interval - Swappiness *int64 // /proc/sys/vm/swappiness - UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes - VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure - WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor - WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor - ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode -} - -// VM reads the VM statistics from the specified `proc` filesystem. -func (fs FS) VM() (*VM, error) { - path := fs.proc.Path("sys/vm") - file, err := os.Stat(path) - if err != nil { - return nil, err - } - if !file.Mode().IsDir() { - return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) - } - - files, err := os.ReadDir(path) - if err != nil { - return nil, err - } - - var vm VM - for _, f := range files { - if f.IsDir() { - continue - } - - name := filepath.Join(path, f.Name()) - // ignore errors on read, as there are some write only - // in /proc/sys/vm - value, err := util.SysReadFile(name) - if err != nil { - continue - } - vp := util.NewValueParser(value) - - switch f.Name() { - case "admin_reserve_kbytes": - vm.AdminReserveKbytes = vp.PInt64() - case "block_dump": - vm.BlockDump = vp.PInt64() - case "compact_unevictable_allowed": - vm.CompactUnevictableAllowed = vp.PInt64() - case "dirty_background_bytes": - vm.DirtyBackgroundBytes = vp.PInt64() - case "dirty_background_ratio": - vm.DirtyBackgroundRatio = vp.PInt64() - case "dirty_bytes": - vm.DirtyBytes = vp.PInt64() - case "dirty_expire_centisecs": - vm.DirtyExpireCentisecs = vp.PInt64() - case "dirty_ratio": - vm.DirtyRatio = vp.PInt64() - case "dirtytime_expire_seconds": - vm.DirtytimeExpireSeconds = vp.PInt64() - case "dirty_writeback_centisecs": - vm.DirtyWritebackCentisecs = vp.PInt64() - case "drop_caches": - vm.DropCaches = vp.PInt64() - case "extfrag_threshold": - vm.ExtfragThreshold = vp.PInt64() - case "hugetlb_shm_group": - vm.HugetlbShmGroup = vp.PInt64() - case "laptop_mode": - vm.LaptopMode = vp.PInt64() - case "legacy_va_layout": - vm.LegacyVaLayout = vp.PInt64() - case "lowmem_reserve_ratio": - stringSlice := strings.Fields(value) - pint64Slice := make([]*int64, 0, len(stringSlice)) - for _, value := range stringSlice { - vp := util.NewValueParser(value) - pint64Slice = append(pint64Slice, vp.PInt64()) - } - vm.LowmemReserveRatio = pint64Slice - case "max_map_count": - vm.MaxMapCount = vp.PInt64() - case "memory_failure_early_kill": - vm.MemoryFailureEarlyKill = vp.PInt64() - case "memory_failure_recovery": - vm.MemoryFailureRecovery = vp.PInt64() - case "min_free_kbytes": - vm.MinFreeKbytes = vp.PInt64() - case "min_slab_ratio": - vm.MinSlabRatio = vp.PInt64() - case "min_unmapped_ratio": - vm.MinUnmappedRatio = vp.PInt64() - case "mmap_min_addr": - vm.MmapMinAddr = vp.PInt64() - case "nr_hugepages": - vm.NrHugepages = vp.PInt64() - case "nr_hugepages_mempolicy": - vm.NrHugepagesMempolicy = vp.PInt64() - case "nr_overcommit_hugepages": - vm.NrOvercommitHugepages = vp.PInt64() - case "numa_stat": - vm.NumaStat = vp.PInt64() - case "numa_zonelist_order": - vm.NumaZonelistOrder = value - case "oom_dump_tasks": - vm.OomDumpTasks = vp.PInt64() - case "oom_kill_allocating_task": - vm.OomKillAllocatingTask = vp.PInt64() - case "overcommit_kbytes": - vm.OvercommitKbytes = vp.PInt64() - case "overcommit_memory": - vm.OvercommitMemory = vp.PInt64() - case "overcommit_ratio": - vm.OvercommitRatio = vp.PInt64() - case "page-cluster": - vm.PageCluster = vp.PInt64() - case "panic_on_oom": - vm.PanicOnOom = vp.PInt64() - case "percpu_pagelist_fraction": - vm.PercpuPagelistFraction = vp.PInt64() - case "stat_interval": - vm.StatInterval = vp.PInt64() - case "swappiness": - vm.Swappiness = vp.PInt64() - case "user_reserve_kbytes": - vm.UserReserveKbytes = vp.PInt64() - case "vfs_cache_pressure": - vm.VfsCachePressure = vp.PInt64() - case "watermark_boost_factor": - vm.WatermarkBoostFactor = vp.PInt64() - case "watermark_scale_factor": - vm.WatermarkScaleFactor = vp.PInt64() - case "zone_reclaim_mode": - vm.ZoneReclaimMode = vp.PInt64() - } - if err := vp.Err(); err != nil { - return nil, err - } - } - - return &vm, nil -} diff --git a/openshift/tools/vendor/github.com/prometheus/procfs/zoneinfo.go b/openshift/tools/vendor/github.com/prometheus/procfs/zoneinfo.go deleted file mode 100644 index e54d94b090..0000000000 --- a/openshift/tools/vendor/github.com/prometheus/procfs/zoneinfo.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "bytes" - "fmt" - "os" - "regexp" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Zoneinfo holds info parsed from /proc/zoneinfo. -type Zoneinfo struct { - Node string - Zone string - NrFreePages *int64 - Min *int64 - Low *int64 - High *int64 - Scanned *int64 - Spanned *int64 - Present *int64 - Managed *int64 - NrActiveAnon *int64 - NrInactiveAnon *int64 - NrIsolatedAnon *int64 - NrAnonPages *int64 - NrAnonTransparentHugepages *int64 - NrActiveFile *int64 - NrInactiveFile *int64 - NrIsolatedFile *int64 - NrFilePages *int64 - NrSlabReclaimable *int64 - NrSlabUnreclaimable *int64 - NrMlockStack *int64 - NrKernelStack *int64 - NrMapped *int64 - NrDirty *int64 - NrWriteback *int64 - NrUnevictable *int64 - NrShmem *int64 - NrDirtied *int64 - NrWritten *int64 - NumaHit *int64 - NumaMiss *int64 - NumaForeign *int64 - NumaInterleave *int64 - NumaLocal *int64 - NumaOther *int64 - Protection []*int64 -} - -var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) - -// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of -// structs containing the relevant info. More information available here: -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := os.ReadFile(fs.proc.Path("zoneinfo")) - if err != nil { - return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) - } - zoneinfo, err := parseZoneinfo(data) - if err != nil { - return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) - } - return zoneinfo, nil -} - -func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { - - zoneinfo := []Zoneinfo{} - - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { - var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { - - if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { - zoneinfoElement.Node = nodeZone[1] - zoneinfoElement.Zone = nodeZone[2] - continue - } - if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { - continue - } - parts := strings.Fields(strings.TrimSpace(line)) - if len(parts) < 2 { - continue - } - vp := util.NewValueParser(parts[1]) - switch parts[0] { - case "nr_free_pages": - zoneinfoElement.NrFreePages = vp.PInt64() - case "min": - zoneinfoElement.Min = vp.PInt64() - case "low": - zoneinfoElement.Low = vp.PInt64() - case "high": - zoneinfoElement.High = vp.PInt64() - case "scanned": - zoneinfoElement.Scanned = vp.PInt64() - case "spanned": - zoneinfoElement.Spanned = vp.PInt64() - case "present": - zoneinfoElement.Present = vp.PInt64() - case "managed": - zoneinfoElement.Managed = vp.PInt64() - case "nr_active_anon": - zoneinfoElement.NrActiveAnon = vp.PInt64() - case "nr_inactive_anon": - zoneinfoElement.NrInactiveAnon = vp.PInt64() - case "nr_isolated_anon": - zoneinfoElement.NrIsolatedAnon = vp.PInt64() - case "nr_anon_pages": - zoneinfoElement.NrAnonPages = vp.PInt64() - case "nr_anon_transparent_hugepages": - zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() - case "nr_active_file": - zoneinfoElement.NrActiveFile = vp.PInt64() - case "nr_inactive_file": - zoneinfoElement.NrInactiveFile = vp.PInt64() - case "nr_isolated_file": - zoneinfoElement.NrIsolatedFile = vp.PInt64() - case "nr_file_pages": - zoneinfoElement.NrFilePages = vp.PInt64() - case "nr_slab_reclaimable": - zoneinfoElement.NrSlabReclaimable = vp.PInt64() - case "nr_slab_unreclaimable": - zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() - case "nr_mlock_stack": - zoneinfoElement.NrMlockStack = vp.PInt64() - case "nr_kernel_stack": - zoneinfoElement.NrKernelStack = vp.PInt64() - case "nr_mapped": - zoneinfoElement.NrMapped = vp.PInt64() - case "nr_dirty": - zoneinfoElement.NrDirty = vp.PInt64() - case "nr_writeback": - zoneinfoElement.NrWriteback = vp.PInt64() - case "nr_unevictable": - zoneinfoElement.NrUnevictable = vp.PInt64() - case "nr_shmem": - zoneinfoElement.NrShmem = vp.PInt64() - case "nr_dirtied": - zoneinfoElement.NrDirtied = vp.PInt64() - case "nr_written": - zoneinfoElement.NrWritten = vp.PInt64() - case "numa_hit": - zoneinfoElement.NumaHit = vp.PInt64() - case "numa_miss": - zoneinfoElement.NumaMiss = vp.PInt64() - case "numa_foreign": - zoneinfoElement.NumaForeign = vp.PInt64() - case "numa_interleave": - zoneinfoElement.NumaInterleave = vp.PInt64() - case "numa_local": - zoneinfoElement.NumaLocal = vp.PInt64() - case "numa_other": - zoneinfoElement.NumaOther = vp.PInt64() - case "protection:": - protectionParts := strings.Split(line, ":") - protectionValues := strings.Replace(protectionParts[1], "(", "", 1) - protectionValues = strings.Replace(protectionValues, ")", "", 1) - protectionValues = strings.TrimSpace(protectionValues) - protectionStringMap := strings.Split(protectionValues, ", ") - val, err := util.ParsePInt64s(protectionStringMap) - if err == nil { - zoneinfoElement.Protection = val - } - } - - } - - zoneinfo = append(zoneinfo, zoneinfoElement) - } - return zoneinfo, nil -} diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig b/openshift/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig deleted file mode 100644 index 6f944f5406..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig +++ /dev/null @@ -1,21 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[{Makefile,*.mk}] -indent_style = tab - -[*.nix] -indent_size = 2 - -[*.go] -indent_style = tab - -[{*.yml,*.yaml}] -indent_size = 2 diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.envrc b/openshift/tools/vendor/github.com/sagikazarmark/locafero/.envrc deleted file mode 100644 index 2e0f9f5f71..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.envrc +++ /dev/null @@ -1,4 +0,0 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" -fi -use flake . --impure diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.gitignore b/openshift/tools/vendor/github.com/sagikazarmark/locafero/.gitignore deleted file mode 100644 index 8f07e60163..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -/.devenv/ -/.direnv/ -/.task/ -/bin/ -/build/ -/tmp/ -/var/ -/vendor/ diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/openshift/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml deleted file mode 100644 index 829de2a4a0..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml +++ /dev/null @@ -1,27 +0,0 @@ -run: - timeout: 10m - -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/sagikazarmark/locafero) - goimports: - local-prefixes: github.com/sagikazarmark/locafero - misspell: - locale: US - nolintlint: - allow-leading-space: false # require machine-readable nolint directives (with no leading space) - allow-unused: false # report any unused nolint directives - require-specific: false # don't require nolint directives to be specific about which linter is being skipped - revive: - confidence: 0 - -linters: - enable: - - gci - - goimports - - misspell - - nolintlint - - revive diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/LICENSE b/openshift/tools/vendor/github.com/sagikazarmark/locafero/LICENSE deleted file mode 100644 index a70b0f2960..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2023 Márk Sági-Kazár - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/README.md b/openshift/tools/vendor/github.com/sagikazarmark/locafero/README.md deleted file mode 100644 index a48e8e9789..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Finder library for [Afero](https://github.com/spf13/afero) - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) - -**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** - -> [!WARNING] -> This is an experimental library under development. -> -> **Backwards compatibility is not guaranteed, expect breaking changes.** - -## Installation - -```shell -go get github.com/sagikazarmark/locafero -``` - -## Usage - -Check out the [package example](https://pkg.go.dev/github.com/sagikazarmark/locafero#example-package) on go.dev. - -## Development - -**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** - -Run the test suite: - -```shell -just test -``` - -## License - -The project is licensed under the [MIT License](LICENSE). diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/file_type.go b/openshift/tools/vendor/github.com/sagikazarmark/locafero/file_type.go deleted file mode 100644 index 9a9b140233..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/file_type.go +++ /dev/null @@ -1,28 +0,0 @@ -package locafero - -import "io/fs" - -// FileType represents the kind of entries [Finder] can return. -type FileType int - -const ( - FileTypeAll FileType = iota - FileTypeFile - FileTypeDir -) - -func (ft FileType) matchFileInfo(info fs.FileInfo) bool { - switch ft { - case FileTypeAll: - return true - - case FileTypeFile: - return !info.IsDir() - - case FileTypeDir: - return info.IsDir() - - default: - return false - } -} diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/finder.go b/openshift/tools/vendor/github.com/sagikazarmark/locafero/finder.go deleted file mode 100644 index ef8d547122..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/finder.go +++ /dev/null @@ -1,165 +0,0 @@ -// Package finder looks for files and directories in an {fs.Fs} filesystem. -package locafero - -import ( - "errors" - "io/fs" - "path/filepath" - "strings" - - "github.com/sourcegraph/conc/iter" - "github.com/spf13/afero" -) - -// Finder looks for files and directories in an [afero.Fs] filesystem. -type Finder struct { - // Paths represents a list of locations that the [Finder] will search in. - // - // They are essentially the root directories or starting points for the search. - // - // Examples: - // - home/user - // - etc - Paths []string - - // Names are specific entries that the [Finder] will look for within the given Paths. - // - // It provides the capability to search for entries with depth, - // meaning it can target deeper locations within the directory structure. - // - // It also supports glob syntax (as defined by [filepath.Match]), offering greater flexibility in search patterns. - // - // Examples: - // - config.yaml - // - home/*/config.yaml - // - home/*/config.* - Names []string - - // Type restricts the kind of entries returned by the [Finder]. - // - // This parameter helps in differentiating and filtering out files from directories or vice versa. - Type FileType -} - -// Find looks for files and directories in an [afero.Fs] filesystem. -func (f Finder) Find(fsys afero.Fs) ([]string, error) { - // Arbitrary go routine limit (TODO: make this a parameter) - // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError() - - type searchItem struct { - path string - name string - } - - var searchItems []searchItem - - for _, searchPath := range f.Paths { - searchPath := searchPath - - for _, searchName := range f.Names { - searchName := searchName - - searchItems = append(searchItems, searchItem{searchPath, searchName}) - - // pool.Go(func() ([]string, error) { - // // If the name contains any glob character, perform a glob match - // if strings.ContainsAny(searchName, globMatch) { - // return globWalkSearch(fsys, searchPath, searchName, f.Type) - // } - // - // return statSearch(fsys, searchPath, searchName, f.Type) - // }) - } - } - - // allResults, err := pool.Wait() - // if err != nil { - // return nil, err - // } - - allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { - // If the name contains any glob character, perform a glob match - if strings.ContainsAny(item.name, globMatch) { - return globWalkSearch(fsys, item.path, item.name, f.Type) - } - - return statSearch(fsys, item.path, item.name, f.Type) - }) - if err != nil { - return nil, err - } - - var results []string - - for _, r := range allResults { - results = append(results, r...) - } - - // Sort results in alphabetical order for now - // sort.Strings(results) - - return results, nil -} - -func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { - var results []string - - err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip the root path - if p == searchPath { - return nil - } - - var result error - - // Stop reading subdirectories - // TODO: add depth detection here - if fileInfo.IsDir() && filepath.Dir(p) == searchPath { - result = fs.SkipDir - } - - // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { - return result - } - - match, err := filepath.Match(searchName, fileInfo.Name()) - if err != nil { - return err - } - - if match { - results = append(results, p) - } - - return result - }) - if err != nil { - return results, err - } - - return results, nil -} - -func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { - filePath := filepath.Join(searchPath, searchName) - - fileInfo, err := fsys.Stat(filePath) - if errors.Is(err, fs.ErrNotExist) { - return nil, nil - } - if err != nil { - return nil, err - } - - // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { - return nil, nil - } - - return []string{filePath}, nil -} diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/flake.lock b/openshift/tools/vendor/github.com/sagikazarmark/locafero/flake.lock deleted file mode 100644 index df2a8cceca..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/flake.lock +++ /dev/null @@ -1,472 +0,0 @@ -{ - "nodes": { - "cachix": { - "inputs": { - "devenv": "devenv_2", - "flake-compat": [ - "devenv", - "flake-compat" - ], - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" - ] - }, - "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", - "owner": "cachix", - "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "cachix", - "type": "github" - } - }, - "devenv": { - "inputs": { - "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1725907707, - "narHash": "sha256-s3pbtzZmVPHzc86WQjK7MGZMNvvw6hWnFMljEkllAfM=", - "owner": "cachix", - "repo": "devenv", - "rev": "2bbbbc468fc02257265a79652a8350651cca495a", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], - "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] - }, - "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", - "owner": "cachix", - "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "python-rewrite", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1725234343, - "narHash": "sha256-+ebgonl3NbiKD2UD0x4BszCZQ6sTfL4xioaM49o5B3Y=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "567b938d64d4b4112ee253b9274472dc3a346eb6", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1709087332, - "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression_2" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1725233747, - "narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", - "owner": "cachix", - "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "rolling", - "repo": "devenv-nixpkgs", - "type": "github" - } - }, - "nixpkgs_3": { - "locked": { - "lastModified": 1725910328, - "narHash": "sha256-n9pCtzGZ0httmTwMuEbi5E78UQ4ZbQMr1pzi5N0LAG8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "5775c2583f1801df7b790bf7f7d710a19bac66f4", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", - "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "poetry2nix", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/flake.nix b/openshift/tools/vendor/github.com/sagikazarmark/locafero/flake.nix deleted file mode 100644 index 312f1ec8cf..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/flake.nix +++ /dev/null @@ -1,64 +0,0 @@ -{ - description = "Finder library for Afero"; - - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ "x86_64-linux" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_23; - }; - - packages = with pkgs; [ - just - - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - - ci_1_21 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_21; - }; - }; - - ci_1_22 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_22; - }; - }; - - ci_1_23 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_23; - }; - }; - }; - }; - }; -} diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/glob.go b/openshift/tools/vendor/github.com/sagikazarmark/locafero/glob.go deleted file mode 100644 index 00f833e99c..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/glob.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !windows - -package locafero - -const globMatch = "*?[]\\^" diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/glob_windows.go b/openshift/tools/vendor/github.com/sagikazarmark/locafero/glob_windows.go deleted file mode 100644 index 7aec2b247d..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/glob_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build windows - -package locafero - -// See [filepath.Match]: -// -// On Windows, escaping is disabled. Instead, '\\' is treated as path separator. -const globMatch = "*?[]^" diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/helpers.go b/openshift/tools/vendor/github.com/sagikazarmark/locafero/helpers.go deleted file mode 100644 index 05b434481f..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/helpers.go +++ /dev/null @@ -1,41 +0,0 @@ -package locafero - -import "fmt" - -// NameWithExtensions creates a list of names from a base name and a list of extensions. -// -// TODO: find a better name for this function. -func NameWithExtensions(baseName string, extensions ...string) []string { - var names []string - - if baseName == "" { - return names - } - - for _, ext := range extensions { - if ext == "" { - continue - } - - names = append(names, fmt.Sprintf("%s.%s", baseName, ext)) - } - - return names -} - -// NameWithOptionalExtensions creates a list of names from a base name and a list of extensions, -// plus it adds the base name (without any extensions) to the end of the list. -// -// TODO: find a better name for this function. -func NameWithOptionalExtensions(baseName string, extensions ...string) []string { - var names []string - - if baseName == "" { - return names - } - - names = NameWithExtensions(baseName, extensions...) - names = append(names, baseName) - - return names -} diff --git a/openshift/tools/vendor/github.com/sagikazarmark/locafero/justfile b/openshift/tools/vendor/github.com/sagikazarmark/locafero/justfile deleted file mode 100644 index 00a88850cc..0000000000 --- a/openshift/tools/vendor/github.com/sagikazarmark/locafero/justfile +++ /dev/null @@ -1,11 +0,0 @@ -default: - just --list - -test: - go test -race -v ./... - -lint: - golangci-lint run - -fmt: - golangci-lint run --fix diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/.gitignore b/openshift/tools/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 0000000000..1fb13abebe --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1,4 @@ +logrus +vendor + +.idea/ diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/.golangci.yml b/openshift/tools/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 0000000000..65dc285037 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/.travis.yml b/openshift/tools/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 0000000000..c1dbd5a3a3 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: 1.15.x +os: linux +install: + - ./travis/install.sh +script: + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/openshift/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000000..7567f61289 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,259 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/LICENSE b/openshift/tools/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 0000000000..f090cb42f3 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/README.md b/openshift/tools/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 0000000000..d1d4a85fd7 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,515 @@ +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```text +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +func init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/alt_exit.go b/openshift/tools/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000000..8fd189e1cc --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/appveyor.yml b/openshift/tools/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000000..df9d65c3a5 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/buffer_pool.go b/openshift/tools/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 0000000000..c7787f77cb --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,43 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/doc.go b/openshift/tools/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 0000000000..da67aba06d --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/entry.go b/openshift/tools/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 0000000000..71cdbbc35d --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,442 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +func (entry *Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() + } + + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + buffer = bufPool.Get() + defer func() { + newEntry.Buffer = nil + buffer.Reset() + bufPool.Put(buffer) + }() + buffer.Reset() + newEntry.Buffer = buffer + + newEntry.write() + + newEntry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(newEntry) + } +} + +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + +func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/exported.go b/openshift/tools/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 0000000000..017c30ce67 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,270 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/formatter.go b/openshift/tools/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 0000000000..408883773e --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/hooks.go b/openshift/tools/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 0000000000..3f151cdc39 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/json_formatter.go b/openshift/tools/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000..c96dc5636b --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,128 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/logger.go b/openshift/tools/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 0000000000..5ff0aef6d3 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,417 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/logrus.go b/openshift/tools/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 0000000000..2f16224cb9 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000000..2403de9819 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 0000000000..499789984d --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 0000000000..ebdae3ec62 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 0000000000..97af92c68e --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000000..3293fb3caa --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 0000000000..f6710b3bd0 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 0000000000..04748b8515 --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 0000000000..2879eb50ea --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/text_formatter.go b/openshift/tools/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000..be2c6efe5e --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,339 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + case InfoLevel: + levelColor = blue + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/openshift/tools/vendor/github.com/sirupsen/logrus/writer.go b/openshift/tools/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 0000000000..074fd4b8bd --- /dev/null +++ b/openshift/tools/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,102 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" + "strings" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +// Writer returns an io.Writer that writes to the logger at the info log level +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that writes to the logger at the given log level +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +// writerScanner scans the input from the reader and writes it to the logger +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } + + // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + + // Close the reader when we are done + reader.Close() +} + +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/.golangci.yml b/openshift/tools/vendor/github.com/sourcegraph/conc/.golangci.yml deleted file mode 100644 index ae65a760a9..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/.golangci.yml +++ /dev/null @@ -1,11 +0,0 @@ -linters: - disable-all: true - enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - staticcheck - - typecheck - - unused diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/LICENSE b/openshift/tools/vendor/github.com/sourcegraph/conc/LICENSE deleted file mode 100644 index 1081f4ef4a..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Sourcegraph - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/README.md b/openshift/tools/vendor/github.com/sourcegraph/conc/README.md deleted file mode 100644 index 1c87c3c969..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/README.md +++ /dev/null @@ -1,464 +0,0 @@ -![conch](https://user-images.githubusercontent.com/12631702/210295964-785cc63d-d697-420c-99ff-f492eb81dec9.svg) - -# `conc`: better structured concurrency for go - -[![Go Reference](https://pkg.go.dev/badge/github.com/sourcegraph/conc.svg)](https://pkg.go.dev/github.com/sourcegraph/conc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-sourcegraph-A112FE?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAEZklEQVRoQ+2aXWgUZxSG3292sxtNN43BhBakFPyhxSujRSxiU1pr7SaGXqgUxOIEW0IFkeYighYUxAuLUlq0lrq2iCDpjWtmFVtoG6QVNOCFVShVLyxIk0DVjZLMxt3xTGTccd2ZOd/8JBHci0CY9zvnPPN+/7sCIXwKavOwAcy2QgngQiIztDSE0OwQlDPYR1ebiaH6J5kZChyfW12gRG4QVgGTBfMchMbFP9Sn5nlZL2D0JjLD6710lc+z0NfqSGTXQRQ4bX07Mq423yoBL3OSyHSvUxirMuaEvgbJWrdcvkHMoJwxYuq4INUhyuWvQa1jvdMGxAvCxJlyEC9XOBCWL04wwRzpbDoDQ7wfZJzIQLi5Eggk6DiRhZgWIAbE3NrM4A3LPT8Q7UgqAqLqTmLSHLGPkyzG/qXEczhd0q6RH+zaSBfaUoc4iQx19pIClIscrTkNZzG6gd7qMY6eC2Hqyo705ZfTf+eqJmhMzcSbYtQpOXc92ZsZjLVAL4YNUQbJ5Ttg4CQrQdGYj44Xr9m1XJCzmZusFDJOWNpHjmh5x624a2ZFtOKDVL+uNo2TuXE3bZQQZUf8gtgqP31uI94Z/rMqix+IGiRfWw3xN9dCgVx+L3WrHm4Dju6PXz/EkjuXJ6R+IGgyOE1TbZqTq9y1eo0EZo7oMo1ktPu3xjHvuiLT5AFNszUyDULtWpzE2/fEsey8O5TbWuGWwxrs5rS7nFNMWJrNh2No74s9Ec4vRNmRRzPXMP19fBMSVsGcOJ98G8N3Wl2gXcbTjbX7vUBxLaeASDQCm5Cu/0E2tvtb0Ea+BowtskFD0wvlc6Rf2M+Jx7dTu7ubFr2dnKDRaMQe2v/tcIrNB7FH0O50AcrBaApmRDVwFO31ql3pD8QW4dP0feNwl/Q+kFEtRyIGyaWXnpy1OO0qNJWHo1y6iCmAGkBb/Ru+HenDWIF2mo4r8G+tRRzoniSn2uqFLxANhe9LKHVyTbz6egk9+x5w5fK6ulSNNMhZ/Feno+GebLZV6isTTa6k5qNl5RnZ5u56Ib6SBvFzaWBBVFZzvnERWlt/Cg4l27XChLCqFyLekjhy6xJyoytgjPf7opIB8QPx7sYFiMXHPGt76m741MhCKMZfng0nBOIjmoJPsLqWHwgFpe6V6qtfcopxveR2Oy+J0ntIN/zCWkf8QNAJ7y6d8Bq4lxLc2/qJl5K7t432XwcqX5CrI34gzATWuYILQtdQPyePDK3iuOekCR3Efjhig1B1Uq5UoXEEoZX7d1q535J5S9VOeFyYyEBku5XTMXXKQTToX5Rg7OI44nbW5oKYeYK4EniMeF0YFNSmb+grhc84LyRCEP1/OurOcipCQbKxDeK2V5FcVyIDMQvsgz5gwFhcWWwKyRlvQ3gv29RwWoDYAbIofNyBxI9eDlQ+n3YgsgCWnr4MStGXQXmv9pF2La/k3OccV54JEBM4yp9EsXa/3LfO0dGPcYq0Y7DfZB8nJzZw2rppHgKgVHs8L5wvRwAAAABJRU5ErkJggg==)](https://sourcegraph.com/github.com/sourcegraph/conc) -[![Go Report Card](https://goreportcard.com/badge/github.com/sourcegraph/conc)](https://goreportcard.com/report/github.com/sourcegraph/conc) -[![codecov](https://codecov.io/gh/sourcegraph/conc/branch/main/graph/badge.svg?token=MQZTEA1QWT)](https://codecov.io/gh/sourcegraph/conc) -[![Discord](https://img.shields.io/badge/discord-chat-%235765F2)](https://discord.gg/bvXQXmtRjN) - -`conc` is your toolbelt for structured concurrency in go, making common tasks -easier and safer. - -```sh -go get github.com/sourcegraph/conc -``` - -# At a glance - -- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup` -- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner -- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results -- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible -- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure -- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks -- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice -- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice -- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines - -All pools are created with -[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New) -or -[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults), -then configured with methods: - -- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool -- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors -- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error -- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error -- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored - -# Goals - -The main goals of the package are: -1) Make it harder to leak goroutines -2) Handle panics gracefully -3) Make concurrent code easier to read - -## Goal #1: Make it harder to leak goroutines - -A common pain point when working with goroutines is cleaning them up. It's -really easy to fire off a `go` statement and fail to properly wait for it to -complete. - -`conc` takes the opinionated stance that all concurrency should be scoped. -That is, goroutines should have an owner and that owner should always -ensure that its owned goroutines exit properly. - -In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines -are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and -`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out -of scope. - -In some cases, you might want a spawned goroutine to outlast the scope of the -caller. In that case, you could pass a `WaitGroup` into the spawning function. - -```go -func main() { - var wg conc.WaitGroup - defer wg.Wait() - - startTheThing(&wg) -} - -func startTheThing(wg *conc.WaitGroup) { - wg.Go(func() { ... }) -} -``` - -For some more discussion on why scoped concurrency is nice, check out [this -blog -post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/). - -## Goal #2: Handle panics gracefully - -A frequent problem with goroutines in long-running applications is handling -panics. A goroutine spawned without a panic handler will crash the whole process -on panic. This is usually undesirable. - -However, if you do add a panic handler to a goroutine, what do you do with the -panic once you catch it? Some options: -1) Ignore it -2) Log it -3) Turn it into an error and return that to the goroutine spawner -4) Propagate the panic to the goroutine spawner - -Ignoring panics is a bad idea since panics usually mean there is actually -something wrong and someone should fix it. - -Just logging panics isn't great either because then there is no indication to the spawner -that something bad happened, and it might just continue on as normal even though your -program is in a really bad state. - -Both (3) and (4) are reasonable options, but both require the goroutine to have -an owner that can actually receive the message that something went wrong. This -is generally not true with a goroutine spawned with `go`, but in the `conc` -package, all goroutines have an owner that must collect the spawned goroutine. -In the conc package, any call to `Wait()` will panic if any of the spawned goroutines -panicked. Additionally, it decorates the panic value with a stacktrace from the child -goroutine so that you don't lose information about what caused the panic. - -Doing this all correctly every time you spawn something with `go` is not -trivial and it requires a lot of boilerplate that makes the important parts of -the code more difficult to read, so `conc` does this for you. - - - - - - - - - - -
stdlibconc
- -```go -type caughtPanicError struct { - val any - stack []byte -} - -func (e *caughtPanicError) Error() string { - return fmt.Sprintf( - "panic: %q\n%s", - e.val, - string(e.stack) - ) -} - -func main() { - done := make(chan error) - go func() { - defer func() { - if v := recover(); v != nil { - done <- &caughtPanicError{ - val: v, - stack: debug.Stack() - } - } else { - done <- nil - } - }() - doSomethingThatMightPanic() - }() - err := <-done - if err != nil { - panic(err) - } -} -``` - - -```go -func main() { - var wg conc.WaitGroup - wg.Go(doSomethingThatMightPanic) - // panics with a nice stacktrace - wg.Wait() -} -``` -
- -## Goal #3: Make concurrent code easier to read - -Doing concurrency correctly is difficult. Doing it in a way that doesn't -obfuscate what the code is actually doing is more difficult. The `conc` package -attempts to make common operations easier by abstracting as much boilerplate -complexity as possible. - -Want to run a set of concurrent tasks with a bounded set of goroutines? Use -`pool.New()`. Want to process an ordered stream of results concurrently, but -still maintain order? Try `stream.New()`. What about a concurrent map over -a slice? Take a peek at `iter.Map()`. - -Browse some examples below for some comparisons with doing these by hand. - -# Examples - -Each of these examples forgoes propagating panics for simplicity. To see -what kind of complexity that would add, check out the "Goal #2" header above. - -Spawn a set of goroutines and waiting for them to finish: - - - - - - - - - - -
stdlibconc
- -```go -func main() { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // crashes on panic! - doSomething() - }() - } - wg.Wait() -} -``` - - -```go -func main() { - var wg conc.WaitGroup - for i := 0; i < 10; i++ { - wg.Go(doSomething) - } - wg.Wait() -} -``` -
- -Process each element of a stream in a static pool of goroutines: - - - - - - - - - - -
stdlibconc
- -```go -func process(stream chan int) { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for elem := range stream { - handle(elem) - } - }() - } - wg.Wait() -} -``` - - -```go -func process(stream chan int) { - p := pool.New().WithMaxGoroutines(10) - for elem := range stream { - elem := elem - p.Go(func() { - handle(elem) - }) - } - p.Wait() -} -``` -
- -Process each element of a slice in a static pool of goroutines: - - - - - - - - - - -
stdlibconc
- -```go -func process(values []int) { - feeder := make(chan int, 8) - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for elem := range feeder { - handle(elem) - } - }() - } - - for _, value := range values { - feeder <- value - } - close(feeder) - wg.Wait() -} -``` - - -```go -func process(values []int) { - iter.ForEach(values, handle) -} -``` -
- -Concurrently map a slice: - - - - - - - - - - -
stdlibconc
- -```go -func concMap( - input []int, - f func(int) int, -) []int { - res := make([]int, len(input)) - var idx atomic.Int64 - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - for { - i := int(idx.Add(1) - 1) - if i >= len(input) { - return - } - - res[i] = f(input[i]) - } - }() - } - wg.Wait() - return res -} -``` - - -```go -func concMap( - input []int, - f func(*int) int, -) []int { - return iter.Map(input, f) -} -``` -
- -Process an ordered stream concurrently: - - - - - - - - - - - -
stdlibconc
- -```go -func mapStream( - in chan int, - out chan int, - f func(int) int, -) { - tasks := make(chan func()) - taskResults := make(chan chan int) - - // Worker goroutines - var workerWg sync.WaitGroup - for i := 0; i < 10; i++ { - workerWg.Add(1) - go func() { - defer workerWg.Done() - for task := range tasks { - task() - } - }() - } - - // Ordered reader goroutines - var readerWg sync.WaitGroup - readerWg.Add(1) - go func() { - defer readerWg.Done() - for result := range taskResults { - item := <-result - out <- item - } - }() - - // Feed the workers with tasks - for elem := range in { - resultCh := make(chan int, 1) - taskResults <- resultCh - tasks <- func() { - resultCh <- f(elem) - } - } - - // We've exhausted input. - // Wait for everything to finish - close(tasks) - workerWg.Wait() - close(taskResults) - readerWg.Wait() -} -``` - - -```go -func mapStream( - in chan int, - out chan int, - f func(int) int, -) { - s := stream.New().WithMaxGoroutines(10) - for elem := range in { - elem := elem - s.Go(func() stream.Callback { - res := f(elem) - return func() { out <- res } - }) - } - s.Wait() -} -``` -
- -# Status - -This package is currently pre-1.0. There are likely to be minor breaking -changes before a 1.0 release as we stabilize the APIs and tweak defaults. -Please open an issue if you have questions, concerns, or requests that you'd -like addressed before the 1.0 release. Currently, a 1.0 is targeted for -March 2023. diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go b/openshift/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go deleted file mode 100644 index 7087e32a8f..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.20 -// +build !go1.20 - -package multierror - -import "go.uber.org/multierr" - -var ( - Join = multierr.Combine -) diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go b/openshift/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go deleted file mode 100644 index 39cff829ac..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.20 -// +build go1.20 - -package multierror - -import "errors" - -var ( - Join = errors.Join -) diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/iter/iter.go b/openshift/tools/vendor/github.com/sourcegraph/conc/iter/iter.go deleted file mode 100644 index 124b4f9400..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/iter/iter.go +++ /dev/null @@ -1,85 +0,0 @@ -package iter - -import ( - "runtime" - "sync/atomic" - - "github.com/sourcegraph/conc" -) - -// defaultMaxGoroutines returns the default maximum number of -// goroutines to use within this package. -func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } - -// Iterator can be used to configure the behaviour of ForEach -// and ForEachIdx. The zero value is safe to use with reasonable -// defaults. -// -// Iterator is also safe for reuse and concurrent use. -type Iterator[T any] struct { - // MaxGoroutines controls the maximum number of goroutines - // to use on this Iterator's methods. - // - // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). - MaxGoroutines int -} - -// ForEach executes f in parallel over each element in input. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// ForEach always uses at most runtime.GOMAXPROCS goroutines. -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. For -// a configurable goroutine limit, use a custom Iterator. -func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } - -// ForEach executes f in parallel over each element in input, -// using up to the Iterator's configured maximum number of -// goroutines. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. -func (iter Iterator[T]) ForEach(input []T, f func(*T)) { - iter.ForEachIdx(input, func(_ int, t *T) { - f(t) - }) -} - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { - if iter.MaxGoroutines == 0 { - // iter is a value receiver and is hence safe to mutate - iter.MaxGoroutines = defaultMaxGoroutines() - } - - numInput := len(input) - if iter.MaxGoroutines > numInput { - // No more concurrent tasks than the number of input items. - iter.MaxGoroutines = numInput - } - - var idx atomic.Int64 - // Create the task outside the loop to avoid extra closure allocations. - task := func() { - i := int(idx.Add(1) - 1) - for ; i < numInput; i = int(idx.Add(1) - 1) { - f(i, &input[i]) - } - } - - var wg conc.WaitGroup - for i := 0; i < iter.MaxGoroutines; i++ { - wg.Go(task) - } - wg.Wait() -} diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/iter/map.go b/openshift/tools/vendor/github.com/sourcegraph/conc/iter/map.go deleted file mode 100644 index efbe6bfaf1..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/iter/map.go +++ /dev/null @@ -1,65 +0,0 @@ -package iter - -import ( - "sync" - - "github.com/sourcegraph/conc/internal/multierror" -) - -// Mapper is an Iterator with a result type R. It can be used to configure -// the behaviour of Map and MapErr. The zero value is safe to use with -// reasonable defaults. -// -// Mapper is also safe for reuse and concurrent use. -type Mapper[T, R any] Iterator[T] - -// Map applies f to each element of input, returning the mapped result. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func Map[T, R any](input []T, f func(*T) R) []R { - return Mapper[T, R]{}.Map(input, f) -} - -// Map applies f to each element of input, returning the mapped result. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { - res := make([]R, len(input)) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - res[i] = f(t) - }) - return res -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { - return Mapper[T, R]{}.MapErr(input, f) -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { - var ( - res = make([]R, len(input)) - errMux sync.Mutex - errs error - ) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - var err error - res[i], err = f(t) - if err != nil { - errMux.Lock() - // TODO: use stdlib errors once multierrors land in go 1.20 - errs = multierror.Join(errs, err) - errMux.Unlock() - } - }) - return res, errs -} diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/panics/panics.go b/openshift/tools/vendor/github.com/sourcegraph/conc/panics/panics.go deleted file mode 100644 index abbed7fa05..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/panics/panics.go +++ /dev/null @@ -1,102 +0,0 @@ -package panics - -import ( - "fmt" - "runtime" - "runtime/debug" - "sync/atomic" -) - -// Catcher is used to catch panics. You can execute a function with Try, -// which will catch any spawned panic. Try can be called any number of times, -// from any number of goroutines. Once all calls to Try have completed, you can -// get the value of the first panic (if any) with Recovered(), or you can just -// propagate the panic (re-panic) with Repanic(). -type Catcher struct { - recovered atomic.Pointer[Recovered] -} - -// Try executes f, catching any panic it might spawn. It is safe -// to call from multiple goroutines simultaneously. -func (p *Catcher) Try(f func()) { - defer p.tryRecover() - f() -} - -func (p *Catcher) tryRecover() { - if val := recover(); val != nil { - rp := NewRecovered(1, val) - p.recovered.CompareAndSwap(nil, &rp) - } -} - -// Repanic panics if any calls to Try caught a panic. It will panic with the -// value of the first panic caught, wrapped in a panics.Recovered with caller -// information. -func (p *Catcher) Repanic() { - if val := p.Recovered(); val != nil { - panic(val) - } -} - -// Recovered returns the value of the first panic caught by Try, or nil if -// no calls to Try panicked. -func (p *Catcher) Recovered() *Recovered { - return p.recovered.Load() -} - -// NewRecovered creates a panics.Recovered from a panic value and a collected -// stacktrace. The skip parameter allows the caller to skip stack frames when -// collecting the stacktrace. Calling with a skip of 0 means include the call to -// NewRecovered in the stacktrace. -func NewRecovered(skip int, value any) Recovered { - // 64 frames should be plenty - var callers [64]uintptr - n := runtime.Callers(skip+1, callers[:]) - return Recovered{ - Value: value, - Callers: callers[:n], - Stack: debug.Stack(), - } -} - -// Recovered is a panic that was caught with recover(). -type Recovered struct { - // The original value of the panic. - Value any - // The caller list as returned by runtime.Callers when the panic was - // recovered. Can be used to produce a more detailed stack information with - // runtime.CallersFrames. - Callers []uintptr - // The formatted stacktrace from the goroutine where the panic was recovered. - // Easier to use than Callers. - Stack []byte -} - -// String renders a human-readable formatting of the panic. -func (p *Recovered) String() string { - return fmt.Sprintf("panic: %v\nstacktrace:\n%s\n", p.Value, p.Stack) -} - -// AsError casts the panic into an error implementation. The implementation -// is unwrappable with the cause of the panic, if the panic was provided one. -func (p *Recovered) AsError() error { - if p == nil { - return nil - } - return &ErrRecovered{*p} -} - -// ErrRecovered wraps a panics.Recovered in an error implementation. -type ErrRecovered struct{ Recovered } - -var _ error = (*ErrRecovered)(nil) - -func (p *ErrRecovered) Error() string { return p.String() } - -func (p *ErrRecovered) Unwrap() error { - if err, ok := p.Value.(error); ok { - return err - } - return nil -} diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/panics/try.go b/openshift/tools/vendor/github.com/sourcegraph/conc/panics/try.go deleted file mode 100644 index 4ded92a1cb..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/panics/try.go +++ /dev/null @@ -1,11 +0,0 @@ -package panics - -// Try executes f, catching and returning any panic it might spawn. -// -// The recovered panic can be propagated with panic(), or handled as a normal error with -// (*panics.Recovered).AsError(). -func Try(f func()) *Recovered { - var c Catcher - c.Try(f) - return c.Recovered() -} diff --git a/openshift/tools/vendor/github.com/sourcegraph/conc/waitgroup.go b/openshift/tools/vendor/github.com/sourcegraph/conc/waitgroup.go deleted file mode 100644 index 47b1bc1a5c..0000000000 --- a/openshift/tools/vendor/github.com/sourcegraph/conc/waitgroup.go +++ /dev/null @@ -1,52 +0,0 @@ -package conc - -import ( - "sync" - - "github.com/sourcegraph/conc/panics" -) - -// NewWaitGroup creates a new WaitGroup. -func NewWaitGroup() *WaitGroup { - return &WaitGroup{} -} - -// WaitGroup is the primary building block for scoped concurrency. -// Goroutines can be spawned in the WaitGroup with the Go method, -// and calling Wait() will ensure that each of those goroutines exits -// before continuing. Any panics in a child goroutine will be caught -// and propagated to the caller of Wait(). -// -// The zero value of WaitGroup is usable, just like sync.WaitGroup. -// Also like sync.WaitGroup, it must not be copied after first use. -type WaitGroup struct { - wg sync.WaitGroup - pc panics.Catcher -} - -// Go spawns a new goroutine in the WaitGroup. -func (h *WaitGroup) Go(f func()) { - h.wg.Add(1) - go func() { - defer h.wg.Done() - h.pc.Try(f) - }() -} - -// Wait will block until all goroutines spawned with Go exit and will -// propagate any panics spawned in a child goroutine. -func (h *WaitGroup) Wait() { - h.wg.Wait() - - // Propagate a panic if we caught one from a child goroutine. - h.pc.Repanic() -} - -// WaitAndRecover will block until all goroutines spawned with Go exit and -// will return a *panics.Recovered if one of the child goroutines panics. -func (h *WaitGroup) WaitAndRecover() *panics.Recovered { - h.wg.Wait() - - // Return a recovered panic if we caught one from a child goroutine. - return h.pc.Recovered() -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/.editorconfig b/openshift/tools/vendor/github.com/spf13/afero/.editorconfig deleted file mode 100644 index 4492e9f9fe..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.go] -indent_style = tab diff --git a/openshift/tools/vendor/github.com/spf13/afero/.gitignore b/openshift/tools/vendor/github.com/spf13/afero/.gitignore deleted file mode 100644 index 9c1d986118..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -sftpfs/file1 -sftpfs/test/ diff --git a/openshift/tools/vendor/github.com/spf13/afero/.golangci.yaml b/openshift/tools/vendor/github.com/spf13/afero/.golangci.yaml deleted file mode 100644 index 806289a250..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/.golangci.yaml +++ /dev/null @@ -1,18 +0,0 @@ -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/afero) - -linters: - disable-all: true - enable: - - gci - - gofmt - - gofumpt - - staticcheck - -issues: - exclude-dirs: - - gcsfs/internal/stiface diff --git a/openshift/tools/vendor/github.com/spf13/afero/LICENSE.txt b/openshift/tools/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e2665..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/openshift/tools/vendor/github.com/spf13/afero/README.md b/openshift/tools/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index 619af574f3..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,442 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with, while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open("/tmp/foo") -``` -We would replace it with: -```go -AppFs.Open("/tmp/foo") -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -### GCSFs - -Afero has experimental support for Google Cloud Storage (GCS). You can either set the -`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in -`NewGcsFS` to configure access to your GCS bucket. - -Some known limitations of the existing implementation: -* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. -* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. -* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. - - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -See the [Releases Page](https://github.com/spf13/afero/releases). - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/openshift/tools/vendor/github.com/spf13/afero/afero.go b/openshift/tools/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index 39f6585209..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - // Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - // Chown changes the uid and gid of the named file. - Chown(name string, uid, gid int) error - - // Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("out of range") - ErrTooLarge = errors.New("too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/openshift/tools/vendor/github.com/spf13/afero/appveyor.yml b/openshift/tools/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index 65e20e8ca3..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,10 +0,0 @@ -# This currently does nothing. We have moved to GitHub action, but this is kept -# until spf13 has disabled this project in AppVeyor. -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - diff --git a/openshift/tools/vendor/github.com/spf13/afero/basepath.go b/openshift/tools/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 2e72793a3e..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,222 +0,0 @@ -package afero - -import ( - "io/fs" - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var ( - _ Lstater = (*BasePathFs)(nil) - _ fs.ReadDirFile = (*BasePathFile)(nil) -) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { - if rdf, ok := f.File.(fs.ReadDirFile); ok { - return rdf.ReadDir(n) - } - return readDirFile{f.File}.ReadDir(n) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chown", Path: name, Err: err} - } - return b.source.Chown(name, uid, gid) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { - oldname, err := b.RealPath(oldname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - newname, err = b.RealPath(newname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - if linker, ok := b.source.(Linker); ok { - return linker.SymlinkIfPossible(oldname, newname) - } - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { - name, err := b.RealPath(name) - if err != nil { - return "", &os.PathError{Op: "readlink", Path: name, Err: err} - } - if reader, ok := b.source.(LinkReader); ok { - return reader.ReadlinkIfPossible(name) - } - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/cacheOnReadFs.go b/openshift/tools/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 017d344fd5..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,315 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { - return copyFileToLayer(u.base, u.layer, name, flag, perm) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chown(name, uid, gid) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chown(name, uid, gid) - } - if err != nil { - return err - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyFileToLayer(name, flag, perm); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/const_bsds.go b/openshift/tools/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 30855de572..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly || zos -// +build aix darwin openbsd freebsd netbsd dragonfly zos - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/openshift/tools/vendor/github.com/spf13/afero/const_win_unix.go b/openshift/tools/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 12792d21e2..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix && !zos -// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix,!zos - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/openshift/tools/vendor/github.com/spf13/afero/copyOnWriteFs.go b/openshift/tools/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index 184d6dd702..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,327 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes(), Chmod() and Chown()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { - if slayer, ok := u.layer.(Linker); ok { - return slayer.SymlinkIfPossible(oldname, newname) - } - - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { - if rlayer, ok := u.layer.(LinkReader); ok { - return rlayer.ReadlinkIfPossible(name) - } - - if rbase, ok := u.base.(LinkReader); ok { - return rbase.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0o777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666) -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/httpFs.go b/openshift/tools/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index ac0de6d51f..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chown(name string, uid, gid int) error { - return h.source.Chown(name, uid, gid) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/internal/common/adapters.go b/openshift/tools/vendor/github.com/spf13/afero/internal/common/adapters.go deleted file mode 100644 index 60685caa54..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/internal/common/adapters.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2022 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import "io/fs" - -// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry -type FileInfoDirEntry struct { - fs.FileInfo -} - -var _ fs.DirEntry = FileInfoDirEntry{} - -func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } - -func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/openshift/tools/vendor/github.com/spf13/afero/iofs.go b/openshift/tools/vendor/github.com/spf13/afero/iofs.go deleted file mode 100644 index b13155ca4a..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/iofs.go +++ /dev/null @@ -1,297 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package afero - -import ( - "io" - "io/fs" - "os" - "path" - "sort" - "time" - - "github.com/spf13/afero/internal/common" -) - -// IOFS adopts afero.Fs to stdlib io/fs.FS -type IOFS struct { - Fs -} - -func NewIOFS(fs Fs) IOFS { - return IOFS{Fs: fs} -} - -var ( - _ fs.FS = IOFS{} - _ fs.GlobFS = IOFS{} - _ fs.ReadDirFS = IOFS{} - _ fs.ReadFileFS = IOFS{} - _ fs.StatFS = IOFS{} - _ fs.SubFS = IOFS{} -) - -func (iofs IOFS) Open(name string) (fs.File, error) { - const op = "open" - - // by convention for fs.FS implementations we should perform this check - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - file, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - // file should implement fs.ReadDirFile - if _, ok := file.(fs.ReadDirFile); !ok { - file = readDirFile{file} - } - - return file, nil -} - -func (iofs IOFS) Glob(pattern string) ([]string, error) { - const op = "glob" - - // afero.Glob does not perform this check but it's required for implementations - if _, err := path.Match(pattern, ""); err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - items, err := Glob(iofs.Fs, pattern) - if err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - return items, nil -} - -func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { - f, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - - defer f.Close() - - if rdf, ok := f.(fs.ReadDirFile); ok { - items, err := rdf.ReadDir(-1) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) - return items, nil - } - - items, err := f.Readdir(-1) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - sort.Sort(byName(items)) - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} - } - - return ret, nil -} - -func (iofs IOFS) ReadFile(name string) ([]byte, error) { - const op = "readfile" - - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - bytes, err := ReadFile(iofs.Fs, name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - return bytes, nil -} - -func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } - -func (IOFS) wrapError(op, path string, err error) error { - if _, ok := err.(*fs.PathError); ok { - return err // don't need to wrap again - } - - return &fs.PathError{ - Op: op, - Path: path, - Err: err, - } -} - -// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open -type readDirFile struct { - File -} - -var _ fs.ReadDirFile = readDirFile{} - -func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) - if err != nil { - return nil, err - } - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} - } - - return ret, nil -} - -// FromIOFS adopts io/fs.FS to use it as afero.Fs -// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission -// To store modifications you may use afero.CopyOnWriteFs -type FromIOFS struct { - fs.FS -} - -var _ Fs = FromIOFS{} - -func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } - -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } - -func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { - return notImplemented("mkdirall", path) -} - -func (f FromIOFS) Open(name string) (File, error) { - file, err := f.FS.Open(name) - if err != nil { - return nil, err - } - - return fromIOFSFile{File: file, name: name}, nil -} - -func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return f.Open(name) -} - -func (f FromIOFS) Remove(name string) error { - return notImplemented("remove", name) -} - -func (f FromIOFS) RemoveAll(path string) error { - return notImplemented("removeall", path) -} - -func (f FromIOFS) Rename(oldname, newname string) error { - return notImplemented("rename", oldname) -} - -func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } - -func (f FromIOFS) Name() string { return "fromiofs" } - -func (f FromIOFS) Chmod(name string, mode os.FileMode) error { - return notImplemented("chmod", name) -} - -func (f FromIOFS) Chown(name string, uid, gid int) error { - return notImplemented("chown", name) -} - -func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { - return notImplemented("chtimes", name) -} - -type fromIOFSFile struct { - fs.File - name string -} - -func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { - readerAt, ok := f.File.(io.ReaderAt) - if !ok { - return -1, notImplemented("readat", f.name) - } - - return readerAt.ReadAt(p, off) -} - -func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { - seeker, ok := f.File.(io.Seeker) - if !ok { - return -1, notImplemented("seek", f.name) - } - - return seeker.Seek(offset, whence) -} - -func (f fromIOFSFile) Write(p []byte) (n int, err error) { - return -1, notImplemented("write", f.name) -} - -func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { - return -1, notImplemented("writeat", f.name) -} - -func (f fromIOFSFile) Name() string { return f.name } - -func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(count) - if err != nil { - return nil, err - } - - ret := make([]os.FileInfo, len(entries)) - for i := range entries { - ret[i], err = entries[i].Info() - if err != nil { - return nil, err - } - } - - return ret, nil -} - -func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(n) - if err != nil { - return nil, err - } - - ret := make([]string, len(entries)) - for i := range entries { - ret[i] = entries[i].Name() - } - - return ret, nil -} - -func (f fromIOFSFile) Sync() error { return nil } - -func (f fromIOFSFile) Truncate(size int64) error { - return notImplemented("truncate", f.name) -} - -func (f fromIOFSFile) WriteString(s string) (ret int, err error) { - return -1, notImplemented("writestring", f.name) -} - -func notImplemented(op, path string) error { - return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/ioutil.go b/openshift/tools/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index fa6abe1eee..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var ( - randNum uint32 - randmu sync.Mutex -) - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextRandom() string { - randmu.Lock() - r := randNum - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - randNum = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir, -// opens the file for reading and writing, and returns the resulting *os.File. -// The filename is generated by taking pattern and adding a random -// string to the end. If pattern includes a "*", the random string -// replaces the last "*". -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, pattern string) (f File, err error) { - return TempFile(a.Fs, dir, pattern) -} - -func TempFile(fs Fs, dir, pattern string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - var prefix, suffix string - if pos := strings.LastIndex(pattern, "*"); pos != -1 { - prefix, suffix = pattern[:pos], pattern[pos+1:] - } else { - prefix = pattern - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - randNum = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} - -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0o700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - randNum = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/lstater.go b/openshift/tools/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0a7..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/match.go b/openshift/tools/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index 7db4b7de6e..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/mem/dir.go b/openshift/tools/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f45..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/mem/dirmap.go b/openshift/tools/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5b5..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/mem/file.go b/openshift/tools/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 62fe4498e1..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "io/fs" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" - - "github.com/spf13/afero/internal/common" -) - -const FilePathSeparator = string(filepath.Separator) - -var _ fs.ReadDirFile = &File{} - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time - uid int - gid int -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func SetUID(f *FileData, uid int) { - f.Lock() - f.uid = uid - f.Unlock() -} - -func SetGID(f *FileData, gid int) { - f.Lock() - f.gid = gid - f.Unlock() -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -// Implements fs.ReadDirFile -func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { - fi, err := f.Readdir(n) - if err != nil { - return nil, err - } - di := make([]fs.DirEntry, len(fi)) - for i, f := range fi { - di[i] = common.FileInfoDirEntry{FileInfo: f} - } - return di, nil -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - prev := atomic.LoadInt64(&f.at) - atomic.StoreInt64(&f.at, off) - n, err = f.Read(b) - atomic.StoreInt64(&f.at, prev) - return -} - -func (f *File) Truncate(size int64) error { - if f.closed { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - f.fileData.Lock() - defer f.fileData.Unlock() - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed { - return 0, ErrFileClosed - } - switch whence { - case io.SeekStart: - atomic.StoreInt64(&f.at, offset) - case io.SeekCurrent: - atomic.AddInt64(&f.at, offset) - case io.SeekEnd: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.closed { - return 0, ErrFileClosed - } - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} - -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} - -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} - -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("out of range") - ErrTooLarge = errors.New("too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/openshift/tools/vendor/github.com/spf13/afero/memmap.go b/openshift/tools/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index ed92f5649d..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "io" - "log" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0o755) - m.data[FilePathSeparator] = root - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file, 0) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) findDescendants(name string) []*mem.FileData { - fData := m.getData() - descendants := make([]*mem.FileData, 0, len(fData)) - for p, dFile := range fData { - if strings.HasPrefix(p, name+FilePathSeparator) { - descendants = append(descendants, dFile) - } - } - - sort.Slice(descendants, func(i, j int) bool { - cur := len(strings.Split(descendants[i].Name(), FilePathSeparator)) - next := len(strings.Split(descendants[j].Name(), FilePathSeparator)) - return cur < next - }) - - return descendants -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, perm) - if err != nil { - // log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - // log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - perm &= chmodBits - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - // Dobule check that it doesn't exist. - if _, ok := m.getData()[name]; ok { - m.mu.Unlock() - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - m.mu.Unlock() - - return m.setFileMode(name, perm|os.ModeDir) -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - perm &= chmodBits - chmod := false - file, err := m.openWrite(name) - if err == nil && (flag&os.O_EXCL > 0) { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} - } - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, io.SeekEnd) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - return file, m.setFileMode(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p := range m.getData() { - if p == path || strings.HasPrefix(p, path+FilePathSeparator) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - err := m.unRegisterWithParent(oldname) - if err != nil { - return err - } - - fileData := m.getData()[oldname] - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - - err = m.renameDescendants(oldname, newname) - if err != nil { - return err - } - - delete(m.getData(), oldname) - - m.registerWithParent(fileData, 0) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) renameDescendants(oldname, newname string) error { - descendants := m.findDescendants(oldname) - removes := make([]string, 0, len(descendants)) - for _, desc := range descendants { - descNewName := strings.Replace(desc.Name(), oldname, newname, 1) - err := m.unRegisterWithParent(desc.Name()) - if err != nil { - return err - } - - removes = append(removes, desc.Name()) - mem.ChangeFileName(desc, descNewName) - m.getData()[descNewName] = desc - - m.registerWithParent(desc, 0) - } - for _, r := range removes { - delete(m.getData(), r) - } - - return nil -} - -func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fileInfo, err := m.Stat(name) - return fileInfo, false, err -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - mode &= chmodBits - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits - - mode = prevOtherBits | mode - return m.setFileMode(name, mode) -} - -func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chown(name string, uid, gid int) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} - } - - mem.SetUID(f, uid) - mem.SetGID(f, gid) - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/os.go b/openshift/tools/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index f1366321ec..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chown(name string, uid, gid int) error { - return os.Chown(name, uid, gid) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} - -func (OsFs) SymlinkIfPossible(oldname, newname string) error { - return os.Symlink(oldname, newname) -} - -func (OsFs) ReadlinkIfPossible(name string) (string, error) { - return os.Readlink(name) -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/path.go b/openshift/tools/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f6b..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/readonlyfs.go b/openshift/tools/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index bd8f9264dd..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,96 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { - if srdr, ok := r.source.(LinkReader); ok { - return srdr.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/regexpfs.go b/openshift/tools/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 218f3b235b..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,223 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Chown(name string, uid, gid int) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chown(name, uid, gid) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - if err != nil { - return nil, err - } - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/symlink.go b/openshift/tools/vendor/github.com/spf13/afero/symlink.go deleted file mode 100644 index aa6ae125b6..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/symlink.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" -) - -// Symlinker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It indicates support for 3 symlink related interfaces that implement the -// behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink -type Symlinker interface { - Lstater - Linker - LinkReader -} - -// Linker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, -// or the filesystem otherwise supports Symlink's. -type Linker interface { - SymlinkIfPossible(oldname, newname string) error -} - -// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system -// does not support Symlink's either directly or through its delegated filesystem. -// As expressed by support for the Linker interface. -var ErrNoSymlink = errors.New("symlink not supported") - -// LinkReader is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -type LinkReader interface { - ReadlinkIfPossible(name string) (string, error) -} - -// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system -// does not support the readlink operation either directly or through its delegated filesystem. -// As expressed by support for the LinkReader interface. -var ErrNoReadlink = errors.New("readlink not supported") diff --git a/openshift/tools/vendor/github.com/spf13/afero/unionFile.go b/openshift/tools/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index 62dd6c93c8..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,330 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), io.SeekStart) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - files := make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - files := f.files[f.off:] - - if c <= 0 { - return files, nil - } - - if len(files) == 0 { - return nil, io.EOF - } - - if c > len(files) { - c = len(files) - } - - defer func() { f.off += c }() - return files[:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyFile(base Fs, layer Fs, name string, bfh File) error { - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - return copyFile(base, layer, name, bfh) -} - -func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { - bfh, err := base.OpenFile(name, flag, perm) - if err != nil { - return err - } - defer bfh.Close() - - return copyFile(base, layer, name, bfh) -} diff --git a/openshift/tools/vendor/github.com/spf13/afero/util.go b/openshift/tools/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 9e4cba2746..0000000000 --- a/openshift/tools/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/runes" - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0o777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - if err != nil { - return false, err - } - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/openshift/tools/vendor/github.com/spf13/cast/.gitignore b/openshift/tools/vendor/github.com/spf13/cast/.gitignore deleted file mode 100644 index 53053a8ac5..0000000000 --- a/openshift/tools/vendor/github.com/spf13/cast/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.bench diff --git a/openshift/tools/vendor/github.com/spf13/cast/LICENSE b/openshift/tools/vendor/github.com/spf13/cast/LICENSE deleted file mode 100644 index 4527efb9c0..0000000000 --- a/openshift/tools/vendor/github.com/spf13/cast/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/openshift/tools/vendor/github.com/spf13/cast/Makefile b/openshift/tools/vendor/github.com/spf13/cast/Makefile deleted file mode 100644 index f01a5dbb6e..0000000000 --- a/openshift/tools/vendor/github.com/spf13/cast/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2) - -.PHONY: check fmt lint test test-race vet test-cover-html help -.DEFAULT_GOAL := help - -check: test-race fmt vet lint ## Run tests and linters - -test: ## Run tests - go test ./... - -test-race: ## Run tests with race detector - go test -race ./... - -fmt: ## Run gofmt linter -ifeq "$(GOVERSION)" "12" - @for d in `go list` ; do \ - if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ - echo "^ improperly formatted go files" && echo && exit 1; \ - fi \ - done -endif - -lint: ## Run golint linter - @for d in `go list` ; do \ - if [ "`golint $$d | tee /dev/stderr`" ]; then \ - echo "^ golint errors!" && echo && exit 1; \ - fi \ - done - -vet: ## Run go vet linter - @if [ "`go vet | tee /dev/stderr`" ]; then \ - echo "^ go vet errors!" && echo && exit 1; \ - fi - -test-cover-html: ## Generate test coverage report - go test -coverprofile=coverage.out -covermode=count - go tool cover -func=coverage.out - -help: - @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/openshift/tools/vendor/github.com/spf13/cast/README.md b/openshift/tools/vendor/github.com/spf13/cast/README.md deleted file mode 100644 index 1be666a456..0000000000 --- a/openshift/tools/vendor/github.com/spf13/cast/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# cast - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/test.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/test.yaml) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) - -Easy and safe casting from one type to another in Go - -Don’t Panic! ... Cast - -## What is Cast? - -Cast is a library to convert between different go types in a consistent and easy way. - -Cast provides simple functions to easily convert a number to a string, an -interface into a bool, etc. Cast does this intelligently when an obvious -conversion is possible. It doesn’t make any attempts to guess what you meant, -for example you can only convert a string to an int when it is a string -representation of an int such as “8”. Cast was developed for use in -[Hugo](https://gohugo.io), a website engine which uses YAML, TOML or JSON -for meta data. - -## Why use Cast? - -When working with dynamic data in Go you often need to cast or convert the data -from one type into another. Cast goes beyond just using type assertion (though -it uses that when possible) to provide a very straightforward and convenient -library. - -If you are working with interfaces to handle things like dynamic content -you’ll need an easy way to convert an interface into a given type. This -is the library for you. - -If you are taking in data from YAML, TOML or JSON or other formats which lack -full types, then Cast is the library for you. - -## Usage - -Cast provides a handful of To_____ methods. These methods will always return -the desired type. **If input is provided that will not convert to that type, the -0 or nil value for that type will be returned**. - -Cast also provides identical methods To_____E. These return the same result as -the To_____ methods, plus an additional error which tells you if it successfully -converted. Using these methods you can tell the difference between when the -input matched the zero value or when the conversion failed and the zero value -was returned. - -The following examples are merely a sample of what is available. Please review -the code for a complete set. - -### Example ‘ToString’: - - cast.ToString("mayonegg") // "mayonegg" - cast.ToString(8) // "8" - cast.ToString(8.31) // "8.31" - cast.ToString([]byte("one time")) // "one time" - cast.ToString(nil) // "" - - var foo interface{} = "one more time" - cast.ToString(foo) // "one more time" - - -### Example ‘ToInt’: - - cast.ToInt(8) // 8 - cast.ToInt(8.31) // 8 - cast.ToInt("8") // 8 - cast.ToInt(true) // 1 - cast.ToInt(false) // 0 - - var eight interface{} = 8 - cast.ToInt(eight) // 8 - cast.ToInt(nil) // 0 diff --git a/openshift/tools/vendor/github.com/spf13/cast/cast.go b/openshift/tools/vendor/github.com/spf13/cast/cast.go deleted file mode 100644 index 0cfe9418de..0000000000 --- a/openshift/tools/vendor/github.com/spf13/cast/cast.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Package cast provides easy and safe casting in Go. -package cast - -import "time" - -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} - -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} - -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) - return v -} diff --git a/openshift/tools/vendor/github.com/spf13/cast/caste.go b/openshift/tools/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index 4181a2e758..0000000000 --- a/openshift/tools/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1510 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -type float64EProvider interface { - Float64() (float64, error) -} - -type float64Provider interface { - Float64() float64 -} - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case json.Number: - s, err1 := ToInt64E(v) - if err1 != nil { - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } - return time.Unix(s, 0), nil - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - case float64EProvider: - var v float64 - v, err = s.Float64() - d = time.Duration(v) - return - case float64Provider: - d = time.Duration(s.Float64()) - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - return b != 0, nil - case int64: - return b != 0, nil - case int32: - return b != 0, nil - case int16: - return b != 0, nil - case int8: - return b != 0, nil - case uint: - return b != 0, nil - case uint64: - return b != 0, nil - case uint32: - return b != 0, nil - case uint16: - return b != 0, nil - case uint8: - return b != 0, nil - case float64: - return b != 0, nil - case float32: - return b != 0, nil - case time.Duration: - return b != 0, nil - case string: - return strconv.ParseBool(i.(string)) - case json.Number: - v, err := ToInt64E(b) - if err == nil { - return v != 0, nil - } - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float64(intv), nil - } - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64Provider: - return s.Float64(), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float32(intv), nil - } - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64Provider: - return float32(s.Float64()), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int64(intv), nil - } - - switch s := i.(type) { - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToInt64E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int32(intv), nil - } - - switch s := i.(type) { - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case json.Number: - return ToInt32E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int16(intv), nil - } - - switch s := i.(type) { - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case json.Number: - return ToInt16E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int8(intv), nil - } - - switch s := i.(type) { - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case json.Number: - return ToInt8E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return intv, nil - } - - switch s := i.(type) { - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToIntE(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - case json.Number: - return ToUintE(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint64(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - case json.Number: - return ToUint64E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint32(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - case json.Number: - return ToUint32E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint16(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - case json.Number: - return ToUint16E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint8(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - case json.Number: - return ToUint8E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - errorType := reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case json.Number: - return s.String(), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - m := map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - m := map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - m := map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - m := map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i interface{}) (map[string]int, error) { - m := map[string]int{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt(val) - } - return m, nil - case map[string]int: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToIntE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - m := map[string]int64{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt64(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt64(val) - } - return m, nil - case map[string]int64: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToInt64E(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, -} - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} - -// toInt returns the int value of v if v or v's underlying type -// is an int. -// Note that this will return false for int64 etc. types. -func toInt(v interface{}) (int, bool) { - switch v := v.(type) { - case int: - return v, true - case time.Weekday: - return int(v), true - case time.Month: - return int(v), true - default: - return 0, false - } -} - -func trimZeroDecimal(s string) string { - var foundZero bool - for i := len(s); i > 0; i-- { - switch s[i-1] { - case '.': - if foundZero { - return s[:i-1] - } - case '0': - foundZero = true - default: - return s - } - } - return s -} diff --git a/openshift/tools/vendor/github.com/spf13/cast/timeformattype_string.go b/openshift/tools/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82ce..0000000000 --- a/openshift/tools/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/.editorconfig b/openshift/tools/vendor/github.com/spf13/pflag/.editorconfig deleted file mode 100644 index 4492e9f9fe..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.go] -indent_style = tab diff --git a/openshift/tools/vendor/github.com/spf13/pflag/.gitignore b/openshift/tools/vendor/github.com/spf13/pflag/.gitignore deleted file mode 100644 index c3da290134..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea/* - diff --git a/openshift/tools/vendor/github.com/spf13/pflag/.golangci.yaml b/openshift/tools/vendor/github.com/spf13/pflag/.golangci.yaml deleted file mode 100644 index b274f24845..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/.golangci.yaml +++ /dev/null @@ -1,4 +0,0 @@ -linters: - disable-all: true - enable: - - nolintlint diff --git a/openshift/tools/vendor/github.com/spf13/pflag/.travis.yml b/openshift/tools/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index 00d04cb9b0..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -sudo: false - -language: go - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get golang.org/x/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/openshift/tools/vendor/github.com/spf13/pflag/LICENSE b/openshift/tools/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea1..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/spf13/pflag/README.md b/openshift/tools/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index 7eacc5bdbe..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helper functions available to get the value stored in a Flag if you have a FlagSet but find -it difficult to keep up with all of the pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/openshift/tools/vendor/github.com/spf13/pflag/bool.go b/openshift/tools/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index c4c5c0bfda..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import "strconv" - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -func boolConv(sval string) (interface{}, error) { - return strconv.ParseBool(sval) -} - -// GetBool return the bool value of a flag with the given name -func (f *FlagSet) GetBool(name string) (bool, error) { - val, err := f.getFlagType(name, "bool", boolConv) - if err != nil { - return false, err - } - return val.(bool), nil -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - return f.BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - b := CommandLine.BoolP(name, shorthand, value, usage) - return b -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/bool_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/bool_slice.go deleted file mode 100644 index 3731370d6a..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/bool_slice.go +++ /dev/null @@ -1,185 +0,0 @@ -package pflag - -import ( - "io" - "strconv" - "strings" -) - -// -- boolSlice Value -type boolSliceValue struct { - value *[]bool - changed bool -} - -func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { - bsv := new(boolSliceValue) - bsv.value = p - *bsv.value = val - return bsv -} - -// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. -// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. -func (s *boolSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse boolean values into slice - out := make([]bool, 0, len(boolStrSlice)) - for _, boolStr := range boolStrSlice { - b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) - if err != nil { - return err - } - out = append(out, b) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *boolSliceValue) Type() string { - return "boolSlice" -} - -// String defines a "native" format for this boolean slice flag value. -func (s *boolSliceValue) String() string { - - boolStrSlice := make([]string, len(*s.value)) - for i, b := range *s.value { - boolStrSlice[i] = strconv.FormatBool(b) - } - - out, _ := writeAsCSV(boolStrSlice) - - return "[" + out + "]" -} - -func (s *boolSliceValue) fromString(val string) (bool, error) { - return strconv.ParseBool(val) -} - -func (s *boolSliceValue) toString(val bool) string { - return strconv.FormatBool(val) -} - -func (s *boolSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *boolSliceValue) Replace(val []string) error { - out := make([]bool, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *boolSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func boolSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []bool{}, nil - } - ss := strings.Split(val, ",") - out := make([]bool, len(ss)) - for i, t := range ss { - var err error - out[i], err = strconv.ParseBool(t) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetBoolSlice returns the []bool value of a flag with the given name. -func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { - val, err := f.getFlagType(name, "boolSlice", boolSliceConv) - if err != nil { - return []bool{}, err - } - return val.([]bool), nil -} - -// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, "", value, usage) - return &p -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func BoolSlice(name string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, "", value, usage) -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/bytes.go b/openshift/tools/vendor/github.com/spf13/pflag/bytes.go deleted file mode 100644 index 67d5304570..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/bytes.go +++ /dev/null @@ -1,209 +0,0 @@ -package pflag - -import ( - "encoding/base64" - "encoding/hex" - "fmt" - "strings" -) - -// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded -type bytesHexValue []byte - -// String implements pflag.Value.String. -func (bytesHex bytesHexValue) String() string { - return fmt.Sprintf("%X", []byte(bytesHex)) -} - -// Set implements pflag.Value.Set. -func (bytesHex *bytesHexValue) Set(value string) error { - bin, err := hex.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesHex = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesHexValue) Type() string { - return "bytesHex" -} - -func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue { - *p = val - return (*bytesHexValue)(p) -} - -func bytesHexConv(sval string) (interface{}, error) { - - bin, err := hex.DecodeString(sval) - - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesHex return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesHex(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesHex", bytesHexConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesHexVar(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, "", value, usage) - return p -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, shorthand, value, usage) - return p -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesHex(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, "", value, usage) -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, shorthand, value, usage) -} - -// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded -type bytesBase64Value []byte - -// String implements pflag.Value.String. -func (bytesBase64 bytesBase64Value) String() string { - return base64.StdEncoding.EncodeToString([]byte(bytesBase64)) -} - -// Set implements pflag.Value.Set. -func (bytesBase64 *bytesBase64Value) Set(value string) error { - bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesBase64 = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesBase64Value) Type() string { - return "bytesBase64" -} - -func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value { - *p = val - return (*bytesBase64Value)(p) -} - -func bytesBase64ValueConv(sval string) (interface{}, error) { - - bin, err := base64.StdEncoding.DecodeString(sval) - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesBase64 return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, "", value, usage) - return p -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, shorthand, value, usage) - return p -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesBase64(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, "", value, usage) -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/count.go b/openshift/tools/vendor/github.com/spf13/pflag/count.go deleted file mode 100644 index a0b2679f71..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/count.go +++ /dev/null @@ -1,96 +0,0 @@ -package pflag - -import "strconv" - -// -- count Value -type countValue int - -func newCountValue(val int, p *int) *countValue { - *p = val - return (*countValue)(p) -} - -func (i *countValue) Set(s string) error { - // "+1" means that no specific value was passed, so increment - if s == "+1" { - *i = countValue(*i + 1) - return nil - } - v, err := strconv.ParseInt(s, 0, 0) - *i = countValue(v) - return err -} - -func (i *countValue) Type() string { - return "count" -} - -func (i *countValue) String() string { return strconv.Itoa(int(*i)) } - -func countConv(sval string) (interface{}, error) { - i, err := strconv.Atoi(sval) - if err != nil { - return nil, err - } - return i, nil -} - -// GetCount return the int value of a flag with the given name -func (f *FlagSet) GetCount(name string) (int, error) { - val, err := f.getFlagType(name, "count", countConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// CountVar defines a count flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) CountVar(p *int, name string, usage string) { - f.CountVarP(p, name, "", usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { - flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "+1" -} - -// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set -func CountVar(p *int, name string, usage string) { - CommandLine.CountVar(p, name, usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func CountVarP(p *int, name, shorthand string, usage string) { - CommandLine.CountVarP(p, name, shorthand, usage) -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) Count(name string, usage string) *int { - p := new(int) - f.CountVarP(p, name, "", usage) - return p -} - -// CountP is like Count only takes a shorthand for the flag name. -func (f *FlagSet) CountP(name, shorthand string, usage string) *int { - p := new(int) - f.CountVarP(p, name, shorthand, usage) - return p -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func Count(name string, usage string) *int { - return CommandLine.CountP(name, "", usage) -} - -// CountP is like Count only takes a shorthand for the flag name. -func CountP(name, shorthand string, usage string) *int { - return CommandLine.CountP(name, shorthand, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/duration.go b/openshift/tools/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index e9debef88e..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "time" -) - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -func durationConv(sval string) (interface{}, error) { - return time.ParseDuration(sval) -} - -// GetDuration return the duration value of a flag with the given name -func (f *FlagSet) GetDuration(name string) (time.Duration, error) { - val, err := f.getFlagType(name, "duration", durationConv) - if err != nil { - return 0, err - } - return val.(time.Duration), nil -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/duration_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/duration_slice.go deleted file mode 100644 index badadda53f..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/duration_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strings" - "time" -) - -// -- durationSlice Value -type durationSliceValue struct { - value *[]time.Duration - changed bool -} - -func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { - dsv := new(durationSliceValue) - dsv.value = p - *dsv.value = val - return dsv -} - -func (s *durationSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *durationSliceValue) Type() string { - return "durationSlice" -} - -func (s *durationSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%s", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *durationSliceValue) fromString(val string) (time.Duration, error) { - return time.ParseDuration(val) -} - -func (s *durationSliceValue) toString(val time.Duration) string { - return fmt.Sprintf("%s", val) -} - -func (s *durationSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *durationSliceValue) Replace(val []string) error { - out := make([]time.Duration, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *durationSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func durationSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []time.Duration{}, nil - } - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetDurationSlice returns the []time.Duration value of a flag with the given name -func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { - val, err := f.getFlagType(name, "durationSlice", durationSliceConv) - if err != nil { - return []time.Duration{}, err - } - return val.([]time.Duration), nil -} - -// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. -// The argument p points to a []time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. -// The argument p points to a duration[] variable in which to store the value of the flag. -func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, "", value, usage) - return &p -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, "", value, usage) -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/flag.go b/openshift/tools/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index 7c058de374..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,1246 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the GNU extensions to the POSIX recommendations -for command-line options. See -http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -Usage: - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - - import flag "github.com/spf13/pflag" - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") -If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - flag.Parse() -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") - } - flag.VarP(&flagval, "varname", "v", "help message") -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -Command line flag syntax: - --flag // boolean flags only - --flag=x - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - goflag "flag" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - // ContinueOnError will return an err from Parse() if an error is found - ContinueOnError ErrorHandling = iota - // ExitOnError will call os.Exit(2) if an error is found when parsing - ExitOnError - // PanicOnError will panic() if an error is found when parsing flags - PanicOnError -) - -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { - // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags - UnknownFlags bool -} - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - // SortFlags is used to indicate, if user wants to have sorted flags in - // help/usage messages. - SortFlags bool - - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist - - name string - parsed bool - actual map[NormalizedName]*Flag - orderedActual []*Flag - sortedActual []*Flag - formal map[NormalizedName]*Flag - orderedFormal []*Flag - sortedFormal []*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - errorHandling ErrorHandling - output io.Writer // nil means stderr; use Output() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName - - addedGoFlagSets []*goflag.FlagSet -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string // default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text - ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// SliceValue is a secondary interface to all flags which hold a list -// of values. This allows full control over the value of list flags, -// and avoids complicated marshalling and unmarshalling to csv. -type SliceValue interface { - // Append adds the specified value to the end of the flag value list. - Append(string) error - // Replace will fully overwrite any data currently in the flag value list. - Replace([]string) error - // GetSlice returns the flag value list as an array of strings. - GetSlice() []string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -// SetNormalizeFunc allows you to add a function which can translate flag names. -// Flags added to the FlagSet will be translated and then when anything tries to -// look up the flag that will also be translated. So it would be possible to create -// a flag named "getURL" and have it translated to "geturl". A user could then pass -// "--getUrl" which may also be translated to "geturl" and everything will work. -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - f.sortedFormal = f.sortedFormal[:0] - for fname, flag := range f.formal { - nname := f.normalizeFlagName(flag.Name) - if fname == nname { - continue - } - flag.Name = string(nname) - delete(f.formal, fname) - f.formal[nname] = flag - if _, set := f.actual[fname]; set { - delete(f.actual, fname) - f.actual[nname] = flag - } - } -} - -// GetNormalizeFunc returns the previously set NormalizeFunc of a function which -// does no translation, if not set previously. -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -// Output returns the destination for usage and error messages. os.Stderr is returned if -// output was not set or was set to nil. -func (f *FlagSet) Output() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// Name returns the name of the flag set. -func (f *FlagSet) Name() string { - return f.name -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - if len(f.formal) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.formal) != len(f.sortedFormal) { - f.sortedFormal = sortFlags(f.formal) - } - flags = f.sortedFormal - } else { - flags = f.orderedFormal - } - - for _, flag := range flags { - fn(flag) - } -} - -// HasFlags returns a bool to indicate if the FlagSet has any flags defined. -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// that are not hidden. -func (f *FlagSet) HasAvailableFlags() bool { - for _, flag := range f.formal { - if !flag.Hidden { - return true - } - } - return false -} - -// VisitAll visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - if len(f.actual) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.actual) != len(f.sortedActual) { - f.sortedActual = sortFlags(f.actual) - } - flags = f.sortedActual - } else { - flags = f.orderedActual - } - - for _, flag := range flags { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -// It panics, if len(name) > 1. -func (f *FlagSet) ShorthandLookup(name string) *Flag { - if name == "" { - return nil - } - if len(name) > 1 { - msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.Output(), msg) - panic(msg) - } - c := name[0] - return f.shorthands[c] -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// func to return a given type for a given flag name -func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { - flag := f.Lookup(name) - if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) - return nil, err - } - - if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) - return nil, err - } - - sval := flag.Value.String() - result, err := convFunc(sval) - if err != nil { - return nil, err - } - return result, nil -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. -func (f *FlagSet) ArgsLenAtDash() int { - return f.argsLenAtDash -} - -// MarkDeprecated indicated that a flag is deprecated in your program. It will -// continue to function but will not show up in help or usage messages. Using -// this flag will also print the given usageMessage. -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.Deprecated = usageMessage - flag.Hidden = true - return nil -} - -// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your -// program. It will continue to function but will not show up in help or usage -// messages. Using this flag will also print the given usageMessage. -func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.ShorthandDeprecated = usageMessage - return nil -} - -// MarkHidden sets a flag to 'hidden' in your program. It will continue to -// function but will not show up in help or usage messages. -func (f *FlagSet) MarkHidden(name string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Hidden = true - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -func ShorthandLookup(name string) *Flag { - return CommandLine.ShorthandLookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - - err := flag.Value.Set(value) - if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) - } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) - } - - if !flag.Changed { - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) - - flag.Changed = true - } - - if flag.Deprecated != "" { - fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. -// This is sometimes used by spf13/cobra programs which want to generate additional -// bash completion information. -func (f *FlagSet) SetAnnotation(name, key string, values []string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[key] = values - return nil -} - -// Changed returns true if the flag was explicitly set during Parse() and false -// otherwise -func (f *FlagSet) Changed(name string) bool { - flag := f.Lookup(name) - // If a flag doesn't exist, it wasn't changed.... - if flag == nil { - return false - } - return flag.Changed -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - usages := f.FlagUsages() - fmt.Fprint(f.Output(), usages) -} - -// defaultIsZeroValue returns true if the default value for this flag represents -// a zero value. -func (f *Flag) defaultIsZeroValue() bool { - switch f.Value.(type) { - case boolFlag: - return f.DefValue == "false" - case *durationValue: - // Beginning in Go 1.7, duration zero values are "0s" - return f.DefValue == "0" || f.DefValue == "0s" - case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: - return f.DefValue == "0" - case *stringValue: - return f.DefValue == "" - case *ipValue, *ipMaskValue, *ipNetValue: - return f.DefValue == "" - case *intSliceValue, *stringSliceValue, *stringArrayValue: - return f.DefValue == "[]" - default: - switch f.Value.String() { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true - } - return false - } -} - -// UnquoteUsage extracts a back-quoted name from the usage -// string for a flag and returns it and the un-quoted usage. -// Given "a `name` to show" it returns ("name", "a name to show"). -// If there are no back quotes, the name is an educated guess of the -// type of the flag's value, or the empty string if the flag is boolean. -func UnquoteUsage(flag *Flag) (name string, usage string) { - // Look for a back-quoted name, but avoid the strings package. - usage = flag.Usage - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name = usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break // Only one back quote; use type name. - } - } - - name = flag.Value.Type() - switch name { - case "bool": - name = "" - case "float64": - name = "float" - case "int64": - name = "int" - case "uint64": - name = "uint" - case "stringSlice": - name = "strings" - case "intSlice": - name = "ints" - case "uintSlice": - name = "uints" - case "boolSlice": - name = "bools" - } - - return -} - -// Splits the string `s` on whitespace into an initial substring up to -// `i` runes in length and the remainder. Will go `slop` over `i` if -// that encompasses the entire string (which allows the caller to -// avoid short orphan words on the final line). -func wrapN(i, slop int, s string) (string, string) { - if i+slop > len(s) { - return s, "" - } - - w := strings.LastIndexAny(s[:i], " \t\n") - if w <= 0 { - return s, "" - } - nlPos := strings.LastIndex(s[:i], "\n") - if nlPos > 0 && nlPos < w { - return s[:nlPos], s[nlPos+1:] - } - return s[:w], s[w+1:] -} - -// Wraps the string `s` to a maximum width `w` with leading indent -// `i`. The first line is not indented (this is assumed to be done by -// caller). Pass `w` == 0 to do no wrapping -func wrap(i, w int, s string) string { - if w == 0 { - return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - // space between indent i and end of line width w into which - // we should wrap the text. - wrap := w - i - - var r, l string - - // Not enough space for sensible wrapping. Wrap as a block on - // the next line instead. - if wrap < 24 { - i = 16 - wrap = w - i - r += "\n" + strings.Repeat(" ", i) - } - // If still not enough space then don't even try to wrap. - if wrap < 24 { - return strings.Replace(s, "\n", r, -1) - } - - // Try to avoid short orphan words on the final line, by - // allowing wrapN to go a bit over if that would fit in the - // remainder of the line. - slop := 5 - wrap = wrap - slop - - // Handle first line, which is indented by the caller (or the - // special case above) - l, s = wrapN(wrap, slop, s) - r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1) - - // Now wrap the rest - for s != "" { - var t string - - t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - return r - -} - -// FlagUsagesWrapped returns a string containing the usage information -// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no -// wrapping) -func (f *FlagSet) FlagUsagesWrapped(cols int) string { - buf := new(bytes.Buffer) - - lines := make([]string, 0, len(f.formal)) - - maxlen := 0 - f.VisitAll(func(flag *Flag) { - if flag.Hidden { - return - } - - line := "" - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) - } else { - line = fmt.Sprintf(" --%s", flag.Name) - } - - varname, usage := UnquoteUsage(flag) - if varname != "" { - line += " " + varname - } - if flag.NoOptDefVal != "" { - switch flag.Value.Type() { - case "string": - line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": - if flag.NoOptDefVal != "true" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - case "count": - if flag.NoOptDefVal != "+1" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - default: - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - } - - // This special character will be replaced with spacing once the - // correct alignment is calculated - line += "\x00" - if len(line) > maxlen { - maxlen = len(line) - } - - line += usage - if !flag.defaultIsZeroValue() { - if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) - } else { - line += fmt.Sprintf(" (default %s)", flag.DefValue) - } - } - if len(flag.Deprecated) != 0 { - line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated) - } - - lines = append(lines, line) - }) - - for _, line := range lines { - sidx := strings.Index(line, "\x00") - spacing := strings.Repeat(" ", maxlen-sidx) - // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) - } - - return buf.String() -} - -// FlagUsages returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { - return f.FlagUsagesWrapped(0) -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -// By default it prints a simple header and calls PrintDefaults; for details about the -// format of the output and how to control it, see the documentation for PrintDefaults. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// VarPF is like VarP, but returns the flag created -func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) - return flag -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - f.VarPF(value, name, shorthand, usage) -} - -// AddFlag will add the flag to the FlagSet -func (f *FlagSet) AddFlag(flag *Flag) { - normalizedFlagName := f.normalizeFlagName(flag.Name) - - _, alreadyThere := f.formal[normalizedFlagName] - if alreadyThere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.Output(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - f.orderedFormal = append(f.orderedFormal, flag) - - if flag.Shorthand == "" { - return - } - if len(flag.Shorthand) > 1 { - msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.Output(), msg) - panic(msg) - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - used, alreadyThere := f.shorthands[c] - if alreadyThere { - msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.Output(), msg) - panic(msg) - } - f.shorthands[c] = flag -} - -// AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored. -func (f *FlagSet) AddFlagSet(newSet *FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(flag *Flag) { - if f.Lookup(flag.Name) == nil { - f.AddFlag(flag) - } - }) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) - f.usage() - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) -func stripUnknownFlagValue(args []string) []string { - if len(args) == 0 { - //--unknown - return args - } - - first := args[0] - if len(first) > 0 && first[0] == '-' { - //--unknown --next-flag ... - return args - } - - //--unknown arg ... (args will be arg ...) - if len(args) > 1 { - return args[1:] - } - return nil -} - -func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, exists := f.formal[f.normalizeFlagName(name)] - - if !exists { - switch { - case name == "help": - f.usage() - return a, ErrHelp - case f.ParseErrorsWhitelist.UnknownFlags: - // --unknown=unknownval arg ... - // we do not want to lose arg in this case - if len(split) >= 2 { - return a, nil - } - - return stripUnknownFlagValue(a), nil - default: - err = f.failf("unknown flag: --%s", name) - return - } - } - - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if flag.NoOptDefVal != "" { - // '--flag' (arg was optional) - value = flag.NoOptDefVal - } else if len(a) > 0 { - // '--flag arg' - value = a[0] - a = a[1:] - } else { - // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) - return - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { - outArgs = args - - if strings.HasPrefix(shorthands, "test.") { - return - } - - outShorts = shorthands[1:] - c := shorthands[0] - - flag, exists := f.shorthands[c] - if !exists { - switch { - case c == 'h': - f.usage() - err = ErrHelp - return - case f.ParseErrorsWhitelist.UnknownFlags: - // '-f=arg arg ...' - // we do not want to lose arg in this case - if len(shorthands) > 2 && shorthands[1] == '=' { - outShorts = "" - return - } - - outArgs = stripUnknownFlagValue(outArgs) - return - default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - } - - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - // '-f=arg' - value = shorthands[2:] - outShorts = "" - } else if flag.NoOptDefVal != "" { - // '-f' (arg was optional) - value = flag.NoOptDefVal - } else if len(shorthands) > 1 { - // '-farg' - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - // '-f arg' - value = args[0] - outArgs = args[1:] - } else { - // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - - if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - shorthands := s[1:] - - // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.argsLenAtDash = len(f.args) - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args, fn) - } else { - args, err = f.parseShortArg(s, args, fn) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - if f.addedGoFlagSets != nil { - for _, goFlagSet := range f.addedGoFlagSets { - goFlagSet.Parse(nil) - } - } - f.parsed = true - - if len(arguments) < 0 { - return nil - } - - f.args = make([]string, 0, len(arguments)) - - set := func(flag *Flag, value string) error { - return f.Set(flag.Name, value) - } - - err := f.parseArgs(arguments, set) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - fmt.Println(err) - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -type parseFunc func(flag *Flag, value string) error - -// ParseAll parses flag definitions from the argument list, which should not -// include the command name. The arguments for fn are flag and value. Must be -// called after all flags in the FlagSet are defined and before flags are -// accessed by the program. The return value will be ErrHelp if -help was set -// but not defined. -func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - - err := f.parseArgs(arguments, fn) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. -// The arguments for fn are flag and value. Must be called after all flags are -// defined and before flags are accessed by the program. -func ParseAll(fn func(flag *Flag, value string) error) { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.ParseAll(os.Args[1:], fn) -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name, -// error handling property and SortFlags set to true. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - argsLenAtDash: -1, - interspersed: true, - SortFlags: true, - } - return f -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling - f.argsLenAtDash = -1 -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/float32.go b/openshift/tools/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index a243f81f7f..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } - -func float32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseFloat(sval, 32) - if err != nil { - return 0, err - } - return float32(v), nil -} - -// GetFloat32 return the float32 value of a flag with the given name -func (f *FlagSet) GetFloat32(name string) (float32, error) { - val, err := f.getFlagType(name, "float32", float32Conv) - if err != nil { - return 0, err - } - return val.(float32), nil -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/float32_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/float32_slice.go deleted file mode 100644 index caa352741a..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/float32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float32Slice Value -type float32SliceValue struct { - value *[]float32 - changed bool -} - -func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { - isv := new(float32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return err - } - out[i] = float32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float32SliceValue) Type() string { - return "float32Slice" -} - -func (s *float32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float32SliceValue) fromString(val string) (float32, error) { - t64, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(t64), nil -} - -func (s *float32SliceValue) toString(val float32) string { - return fmt.Sprintf("%f", val) -} - -func (s *float32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float32SliceValue) Replace(val []string) error { - out := make([]float32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float32{}, nil - } - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return nil, err - } - out[i] = float32(temp64) - - } - return out, nil -} - -// GetFloat32Slice return the []float32 value of a flag with the given name -func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { - val, err := f.getFlagType(name, "float32Slice", float32SliceConv) - if err != nil { - return []float32{}, err - } - return val.([]float32), nil -} - -// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. -// The argument p points to a []float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. -// The argument p points to a float32[] variable in which to store the value of the flag. -func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func Float32Slice(name string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, "", value, usage) -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/float64.go b/openshift/tools/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 04b5492a7d..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } - -func float64Conv(sval string) (interface{}, error) { - return strconv.ParseFloat(sval, 64) -} - -// GetFloat64 return the float64 value of a flag with the given name -func (f *FlagSet) GetFloat64(name string) (float64, error) { - val, err := f.getFlagType(name, "float64", float64Conv) - if err != nil { - return 0, err - } - return val.(float64), nil -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/float64_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/float64_slice.go deleted file mode 100644 index 85bf3073d5..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/float64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float64Slice Value -type float64SliceValue struct { - value *[]float64 - changed bool -} - -func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { - isv := new(float64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float64SliceValue) Type() string { - return "float64Slice" -} - -func (s *float64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float64SliceValue) fromString(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -func (s *float64SliceValue) toString(val float64) string { - return fmt.Sprintf("%f", val) -} - -func (s *float64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float64SliceValue) Replace(val []string) error { - out := make([]float64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float64{}, nil - } - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetFloat64Slice return the []float64 value of a flag with the given name -func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { - val, err := f.getFlagType(name, "float64Slice", float64SliceConv) - if err != nil { - return []float64{}, err - } - return val.([]float64), nil -} - -// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. -// The argument p points to a []float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. -// The argument p points to a float64[] variable in which to store the value of the flag. -func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func Float64Slice(name string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, "", value, usage) -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/golangflag.go b/openshift/tools/vendor/github.com/spf13/pflag/golangflag.go deleted file mode 100644 index d3dd72b7fe..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/golangflag.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "reflect" - "strings" -) - -// flagValueWrapper implements pflag.Value around a flag.Value. The main -// difference here is the addition of the Type method that returns a string -// name of the type. As this is generally unknown, we approximate that with -// reflection. -type flagValueWrapper struct { - inner goflag.Value - flagType string -} - -// We are just copying the boolFlag interface out of goflag as that is what -// they use to decide if a flag should get "true" when no arg is given. -type goBoolFlag interface { - goflag.Value - IsBoolFlag() bool -} - -func wrapFlagValue(v goflag.Value) Value { - // If the flag.Value happens to also be a pflag.Value, just use it directly. - if pv, ok := v.(Value); ok { - return pv - } - - pv := &flagValueWrapper{ - inner: v, - } - - t := reflect.TypeOf(v) - if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { - t = t.Elem() - } - - pv.flagType = strings.TrimSuffix(t.Name(), "Value") - return pv -} - -func (v *flagValueWrapper) String() string { - return v.inner.String() -} - -func (v *flagValueWrapper) Set(s string) error { - return v.inner.Set(s) -} - -func (v *flagValueWrapper) Type() string { - return v.flagType -} - -// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag -// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei -// with both `-v` and `--v` in flags. If the golang flag was more than a single -// character (ex: `verbose`) it will only be accessible via `--verbose` -func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: goflag.Name, - Usage: goflag.Usage, - Value: wrapFlagValue(goflag.Value), - // Looks like golang flags don't set DefValue correctly :-( - //DefValue: goflag.DefValue, - DefValue: goflag.Value.String(), - } - // Ex: if the golang flag was -v, allow both -v and --v to work - if len(flag.Name) == 1 { - flag.Shorthand = flag.Name - } - if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { - flag.NoOptDefVal = "true" - } - return flag -} - -// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet -func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { - if f.Lookup(goflag.Name) != nil { - return - } - newflag := PFlagFromGoFlag(goflag) - f.AddFlag(newflag) -} - -// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet -func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(goflag *goflag.Flag) { - f.AddGoFlag(goflag) - }) - if f.addedGoFlagSets == nil { - f.addedGoFlagSets = make([]*goflag.FlagSet, 0) - } - f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int.go b/openshift/tools/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index 1474b89df6..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return strconv.Itoa(int(*i)) } - -func intConv(sval string) (interface{}, error) { - return strconv.Atoi(sval) -} - -// GetInt return the int value of a flag with the given name -func (f *FlagSet) GetInt(name string) (int, error) { - val, err := f.getFlagType(name, "int", intConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int16.go b/openshift/tools/vendor/github.com/spf13/pflag/int16.go deleted file mode 100644 index f1a01d05e6..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int16 Value -type int16Value int16 - -func newInt16Value(val int16, p *int16) *int16Value { - *p = val - return (*int16Value)(p) -} - -func (i *int16Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 16) - *i = int16Value(v) - return err -} - -func (i *int16Value) Type() string { - return "int16" -} - -func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 16) - if err != nil { - return 0, err - } - return int16(v), nil -} - -// GetInt16 returns the int16 value of a flag with the given name -func (f *FlagSet) GetInt16(name string) (int16, error) { - val, err := f.getFlagType(name, "int16", int16Conv) - if err != nil { - return 0, err - } - return val.(int16), nil -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func Int16Var(p *int16, name string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, "", value, usage) - return p -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, shorthand, value, usage) - return p -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func Int16(name string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, "", value, usage) -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func Int16P(name, shorthand string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int32.go b/openshift/tools/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 9b95944f0f..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 32) - if err != nil { - return 0, err - } - return int32(v), nil -} - -// GetInt32 return the int32 value of a flag with the given name -func (f *FlagSet) GetInt32(name string) (int32, error) { - val, err := f.getFlagType(name, "int32", int32Conv) - if err != nil { - return 0, err - } - return val.(int32), nil -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int32_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/int32_slice.go deleted file mode 100644 index ff128ff06d..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int32Slice Value -type int32SliceValue struct { - value *[]int32 - changed bool -} - -func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { - isv := new(int32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return err - } - out[i] = int32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int32SliceValue) Type() string { - return "int32Slice" -} - -func (s *int32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int32SliceValue) fromString(val string) (int32, error) { - t64, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(t64), nil -} - -func (s *int32SliceValue) toString(val int32) string { - return fmt.Sprintf("%d", val) -} - -func (s *int32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int32SliceValue) Replace(val []string) error { - out := make([]int32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int32{}, nil - } - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return nil, err - } - out[i] = int32(temp64) - - } - return out, nil -} - -// GetInt32Slice return the []int32 value of a flag with the given name -func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { - val, err := f.getFlagType(name, "int32Slice", int32SliceConv) - if err != nil { - return []int32{}, err - } - return val.([]int32), nil -} - -// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. -// The argument p points to a []int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. -// The argument p points to a int32[] variable in which to store the value of the flag. -func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func Int32Slice(name string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, "", value, usage) -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int64.go b/openshift/tools/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0026d781d9..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int64Conv(sval string) (interface{}, error) { - return strconv.ParseInt(sval, 0, 64) -} - -// GetInt64 return the int64 value of a flag with the given name -func (f *FlagSet) GetInt64(name string) (int64, error) { - val, err := f.getFlagType(name, "int64", int64Conv) - if err != nil { - return 0, err - } - return val.(int64), nil -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int64_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/int64_slice.go deleted file mode 100644 index 25464638f3..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int64Slice Value -type int64SliceValue struct { - value *[]int64 - changed bool -} - -func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { - isv := new(int64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int64SliceValue) Type() string { - return "int64Slice" -} - -func (s *int64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int64SliceValue) fromString(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -func (s *int64SliceValue) toString(val int64) string { - return fmt.Sprintf("%d", val) -} - -func (s *int64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int64SliceValue) Replace(val []string) error { - out := make([]int64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int64{}, nil - } - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetInt64Slice return the []int64 value of a flag with the given name -func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { - val, err := f.getFlagType(name, "int64Slice", int64SliceConv) - if err != nil { - return []int64{}, err - } - return val.([]int64), nil -} - -// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. -// The argument p points to a []int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. -// The argument p points to a int64[] variable in which to store the value of the flag. -func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func Int64Slice(name string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, "", value, usage) -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int8.go b/openshift/tools/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index 4da92228e6..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 8) - if err != nil { - return 0, err - } - return int8(v), nil -} - -// GetInt8 return the int8 value of a flag with the given name -func (f *FlagSet) GetInt8(name string) (int8, error) { - val, err := f.getFlagType(name, "int8", int8Conv) - if err != nil { - return 0, err - } - return val.(int8), nil -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/int_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/int_slice.go deleted file mode 100644 index e71c39d91a..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/int_slice.go +++ /dev/null @@ -1,158 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- intSlice Value -type intSliceValue struct { - value *[]int - changed bool -} - -func newIntSliceValue(val []int, p *[]int) *intSliceValue { - isv := new(intSliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *intSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *intSliceValue) Type() string { - return "intSlice" -} - -func (s *intSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *intSliceValue) Append(val string) error { - i, err := strconv.Atoi(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *intSliceValue) Replace(val []string) error { - out := make([]int, len(val)) - for i, d := range val { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *intSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = strconv.Itoa(d) - } - return out -} - -func intSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int{}, nil - } - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetIntSlice return the []int value of a flag with the given name -func (f *FlagSet) GetIntSlice(name string) ([]int, error) { - val, err := f.getFlagType(name, "intSlice", intSliceConv) - if err != nil { - return []int{}, err - } - return val.([]int), nil -} - -// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. -// The argument p points to a []int variable in which to store the value of the flag. -func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSliceVar defines a int[] flag with specified name, default value, and usage string. -// The argument p points to a int[] variable in which to store the value of the flag. -func IntSliceVar(p *[]int, name string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, "", value, usage) - return &p -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func IntSlice(name string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, "", value, usage) -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func IntSliceP(name, shorthand string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/ip.go b/openshift/tools/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index 06b8bcb572..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,97 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - if s == "" { - return nil - } - ip := net.ParseIP(strings.TrimSpace(s)) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -func ipConv(sval string) (interface{}, error) { - ip := net.ParseIP(sval) - if ip != nil { - return ip, nil - } - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) -} - -// GetIP return the net.IP value of a flag with the given name -func (f *FlagSet) GetIP(name string) (net.IP, error) { - val, err := f.getFlagType(name, "ip", ipConv) - if err != nil { - return nil, err - } - return val.(net.IP), nil -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/ip_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/ip_slice.go deleted file mode 100644 index 775faae4fd..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/ip_slice.go +++ /dev/null @@ -1,186 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipSlice Value -type ipSliceValue struct { - value *[]net.IP - changed bool -} - -func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { - ipsv := new(ipSliceValue) - ipsv.value = p - *ipsv.value = val - return ipsv -} - -// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. -// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. -func (s *ipSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IP, 0, len(ipStrSlice)) - for _, ipStr := range ipStrSlice { - ip := net.ParseIP(strings.TrimSpace(ipStr)) - if ip == nil { - return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) - } - out = append(out, ip) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipSliceValue) Type() string { - return "ipSlice" -} - -// String defines a "native" format for this net.IP slice flag value. -func (s *ipSliceValue) String() string { - - ipStrSlice := make([]string, len(*s.value)) - for i, ip := range *s.value { - ipStrSlice[i] = ip.String() - } - - out, _ := writeAsCSV(ipStrSlice) - - return "[" + out + "]" -} - -func (s *ipSliceValue) fromString(val string) (net.IP, error) { - return net.ParseIP(strings.TrimSpace(val)), nil -} - -func (s *ipSliceValue) toString(val net.IP) string { - return val.String() -} - -func (s *ipSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *ipSliceValue) Replace(val []string) error { - out := make([]net.IP, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *ipSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func ipSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IP{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IP, len(ss)) - for i, sval := range ss { - ip := net.ParseIP(strings.TrimSpace(sval)) - if ip == nil { - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) - } - out[i] = ip - } - return out, nil -} - -// GetIPSlice returns the []net.IP value of a flag with the given name -func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { - val, err := f.getFlagType(name, "ipSlice", ipSliceConv) - if err != nil { - return []net.IP{}, err - } - return val.([]net.IP), nil -} - -// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of that flag. -func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPSlice(name string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, "", value, usage) -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/ipmask.go b/openshift/tools/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 5bd44bd21d..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,122 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strconv" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - if len(s) != 8 { - return nil - } - // net.IPMask.String() actually outputs things like ffffff00 - // so write a horrible parser for that as well :-( - m := []int{} - for i := 0; i < 4; i++ { - b := "0x" + s[2*i:2*i+2] - d, err := strconv.ParseInt(b, 0, 0) - if err != nil { - return nil - } - m = append(m, int(d)) - } - s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) - mask = net.ParseIP(s) - if mask == nil { - return nil - } - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -func parseIPv4Mask(sval string) (interface{}, error) { - mask := ParseIPv4Mask(sval) - if mask == nil { - return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) - } - return mask, nil -} - -// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name -func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { - val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) - if err != nil { - return nil, err - } - return val.(net.IPMask), nil -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/ipnet.go b/openshift/tools/vendor/github.com/spf13/pflag/ipnet.go deleted file mode 100644 index e2c1b8bcd5..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/ipnet.go +++ /dev/null @@ -1,98 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// IPNet adapts net.IPNet for use as a flag. -type ipNetValue net.IPNet - -func (ipnet ipNetValue) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *ipNetValue) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = ipNetValue(*n) - return nil -} - -func (*ipNetValue) Type() string { - return "ipNet" -} - -func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { - *p = val - return (*ipNetValue)(p) -} - -func ipNetConv(sval string) (interface{}, error) { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err == nil { - return *n, nil - } - return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) -} - -// GetIPNet return the net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { - val, err := f.getFlagType(name, "ipNet", ipNetConv) - if err != nil { - return net.IPNet{}, err - } - return val.(net.IPNet), nil -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, "", value, usage) - return p -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, shorthand, value, usage) - return p -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func IPNet(name string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, "", value, usage) -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/ipnet_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/ipnet_slice.go deleted file mode 100644 index 6b541aa879..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/ipnet_slice.go +++ /dev/null @@ -1,147 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipNetSlice Value -type ipNetSliceValue struct { - value *[]net.IPNet - changed bool -} - -func newIPNetSliceValue(val []net.IPNet, p *[]net.IPNet) *ipNetSliceValue { - ipnsv := new(ipNetSliceValue) - ipnsv.value = p - *ipnsv.value = val - return ipnsv -} - -// Set converts, and assigns, the comma-separated IPNet argument string representation as the []net.IPNet value of this flag. -// If Set is called on a flag that already has a []net.IPNet assigned, the newly converted values will be appended. -func (s *ipNetSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipNetStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IPNet, 0, len(ipNetStrSlice)) - for _, ipNetStr := range ipNetStrSlice { - _, n, err := net.ParseCIDR(strings.TrimSpace(ipNetStr)) - if err != nil { - return fmt.Errorf("invalid string being converted to CIDR: %s", ipNetStr) - } - out = append(out, *n) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipNetSliceValue) Type() string { - return "ipNetSlice" -} - -// String defines a "native" format for this net.IPNet slice flag value. -func (s *ipNetSliceValue) String() string { - - ipNetStrSlice := make([]string, len(*s.value)) - for i, n := range *s.value { - ipNetStrSlice[i] = n.String() - } - - out, _ := writeAsCSV(ipNetStrSlice) - return "[" + out + "]" -} - -func ipNetSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IPNet{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IPNet, len(ss)) - for i, sval := range ss { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err != nil { - return nil, fmt.Errorf("invalid string being converted to CIDR: %s", sval) - } - out[i] = *n - } - return out, nil -} - -// GetIPNetSlice returns the []net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNetSlice(name string) ([]net.IPNet, error) { - val, err := f.getFlagType(name, "ipNetSlice", ipNetSliceConv) - if err != nil { - return []net.IPNet{}, err - } - return val.([]net.IPNet), nil -} - -// IPNetSliceVar defines a ipNetSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { - f.VarP(newIPNetSliceValue(value, p), name, "", usage) -} - -// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { - f.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) -} - -// IPNetSliceVar defines a []net.IPNet flag with specified name, default value, and usage string. -// The argument p points to a []net.IPNet variable in which to store the value of the flag. -func IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { - CommandLine.VarP(newIPNetSliceValue(value, p), name, "", usage) -} - -// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { - CommandLine.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) -} - -// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of a []net.IPNet variable that stores the value of that flag. -func (f *FlagSet) IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { - p := []net.IPNet{} - f.IPNetSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { - p := []net.IPNet{} - f.IPNetSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { - return CommandLine.IPNetSliceP(name, "", value, usage) -} - -// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. -func IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { - return CommandLine.IPNetSliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/string.go b/openshift/tools/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 04e0a26ff7..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,80 +0,0 @@ -package pflag - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return string(*s) } - -func stringConv(sval string) (interface{}, error) { - return sval, nil -} - -// GetString return the string value of a flag with the given name -func (f *FlagSet) GetString(name string) (string, error) { - val, err := f.getFlagType(name, "string", stringConv) - if err != nil { - return "", err - } - return val.(string), nil -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/string_array.go b/openshift/tools/vendor/github.com/spf13/pflag/string_array.go deleted file mode 100644 index d1ff0a96ba..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/string_array.go +++ /dev/null @@ -1,125 +0,0 @@ -package pflag - -// -- stringArray Value -type stringArrayValue struct { - value *[]string - changed bool -} - -func newStringArrayValue(val []string, p *[]string) *stringArrayValue { - ssv := new(stringArrayValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func (s *stringArrayValue) Set(val string) error { - if !s.changed { - *s.value = []string{val} - s.changed = true - } else { - *s.value = append(*s.value, val) - } - return nil -} - -func (s *stringArrayValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringArrayValue) Replace(val []string) error { - out := make([]string, len(val)) - for i, d := range val { - out[i] = d - } - *s.value = out - return nil -} - -func (s *stringArrayValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = d - } - return out -} - -func (s *stringArrayValue) Type() string { - return "stringArray" -} - -func (s *stringArrayValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringArrayConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a array with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringArray return the []string value of a flag with the given name -func (f *FlagSet) GetStringArray(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringArray", stringArrayConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArrayVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, "", value, usage) - return &p -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArray(name string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, "", value, usage) -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func StringArrayP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/string_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/string_slice.go deleted file mode 100644 index 3cb2e69dba..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/string_slice.go +++ /dev/null @@ -1,163 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "strings" -) - -// -- stringSlice Value -type stringSliceValue struct { - value *[]string - changed bool -} - -func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - ssv := new(stringSliceValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -func writeAsCSV(vals []string) (string, error) { - b := &bytes.Buffer{} - w := csv.NewWriter(b) - err := w.Write(vals) - if err != nil { - return "", err - } - w.Flush() - return strings.TrimSuffix(b.String(), "\n"), nil -} - -func (s *stringSliceValue) Set(val string) error { - v, err := readAsCSV(val) - if err != nil { - return err - } - if !s.changed { - *s.value = v - } else { - *s.value = append(*s.value, v...) - } - s.changed = true - return nil -} - -func (s *stringSliceValue) Type() string { - return "stringSlice" -} - -func (s *stringSliceValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func (s *stringSliceValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringSliceValue) Replace(val []string) error { - *s.value = val - return nil -} - -func (s *stringSliceValue) GetSlice() []string { - return *s.value -} - -func stringSliceConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a slice with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringSlice return the []string value of a flag with the given name -func (f *FlagSet) GetStringSlice(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringSlice", stringSliceConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSliceVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, "", value, usage) - return &p -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSlice(name string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, "", value, usage) -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func StringSliceP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/string_to_int.go b/openshift/tools/vendor/github.com/spf13/pflag/string_to_int.go deleted file mode 100644 index 5ceda3965d..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/string_to_int.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt Value -type stringToIntValue struct { - value *map[string]int - changed bool -} - -func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { - ssv := new(stringToIntValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToIntValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToIntValue) Type() string { - return "stringToInt" -} - -func (s *stringToIntValue) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.Itoa(v)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToIntConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt return the map[string]int value of a flag with the given name -func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { - val, err := f.getFlagType(name, "stringToInt", stringToIntConv) - if err != nil { - return map[string]int{}, err - } - return val.(map[string]int), nil -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, "", value, usage) - return &p -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt(name string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, "", value, usage) -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/string_to_int64.go b/openshift/tools/vendor/github.com/spf13/pflag/string_to_int64.go deleted file mode 100644 index a807a04a0b..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/string_to_int64.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt64 Value -type stringToInt64Value struct { - value *map[string]int64 - changed bool -} - -func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { - ssv := new(stringToInt64Value) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToInt64Value) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToInt64Value) Type() string { - return "stringToInt64" -} - -func (s *stringToInt64Value) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.FormatInt(v, 10)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToInt64Conv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int64{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt64 return the map[string]int64 value of a flag with the given name -func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { - val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) - if err != nil { - return map[string]int64{}, err - } - return val.(map[string]int64), nil -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, "", value, usage) - return &p -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, "", value, usage) -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/string_to_string.go b/openshift/tools/vendor/github.com/spf13/pflag/string_to_string.go deleted file mode 100644 index 890a01afc0..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/string_to_string.go +++ /dev/null @@ -1,160 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "fmt" - "strings" -) - -// -- stringToString Value -type stringToStringValue struct { - value *map[string]string - changed bool -} - -func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { - ssv := new(stringToStringValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToStringValue) Set(val string) error { - var ss []string - n := strings.Count(val, "=") - switch n { - case 0: - return fmt.Errorf("%s must be formatted as key=value", val) - case 1: - ss = append(ss, strings.Trim(val, `"`)) - default: - r := csv.NewReader(strings.NewReader(val)) - var err error - ss, err = r.Read() - if err != nil { - return err - } - } - - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToStringValue) Type() string { - return "stringToString" -} - -func (s *stringToStringValue) String() string { - records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { - records = append(records, k+"="+v) - } - - var buf bytes.Buffer - w := csv.NewWriter(&buf) - if err := w.Write(records); err != nil { - panic(err) - } - w.Flush() - return "[" + strings.TrimSpace(buf.String()) + "]" -} - -func stringToStringConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]string{}, nil - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil, err - } - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - return out, nil -} - -// GetStringToString return the map[string]string value of a flag with the given name -func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { - val, err := f.getFlagType(name, "stringToString", stringToStringConv) - if err != nil { - return map[string]string{}, err - } - return val.(map[string]string), nil -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, "", value, usage) - return &p -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToString(name string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, "", value, usage) -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/uint.go b/openshift/tools/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index dcbc2b758c..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uintConv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 0) - if err != nil { - return 0, err - } - return uint(v), nil -} - -// GetUint return the uint value of a flag with the given name -func (f *FlagSet) GetUint(name string) (uint, error) { - val, err := f.getFlagType(name, "uint", uintConv) - if err != nil { - return 0, err - } - return val.(uint), nil -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/uint16.go b/openshift/tools/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index 7e9914eddd..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 16) - if err != nil { - return 0, err - } - return uint16(v), nil -} - -// GetUint16 return the uint16 value of a flag with the given name -func (f *FlagSet) GetUint16(name string) (uint16, error) { - val, err := f.getFlagType(name, "uint16", uint16Conv) - if err != nil { - return 0, err - } - return val.(uint16), nil -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/uint32.go b/openshift/tools/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index d8024539bf..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint32 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} - -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 32) - if err != nil { - return 0, err - } - return uint32(v), nil -} - -// GetUint32 return the uint32 value of a flag with the given name -func (f *FlagSet) GetUint32(name string) (uint32, error) { - val, err := f.getFlagType(name, "uint32", uint32Conv) - if err != nil { - return 0, err - } - return val.(uint32), nil -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/uint64.go b/openshift/tools/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index f62240f2ce..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint64Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 64) - if err != nil { - return 0, err - } - return uint64(v), nil -} - -// GetUint64 return the uint64 value of a flag with the given name -func (f *FlagSet) GetUint64(name string) (uint64, error) { - val, err := f.getFlagType(name, "uint64", uint64Conv) - if err != nil { - return 0, err - } - return val.(uint64), nil -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/uint8.go b/openshift/tools/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index bb0e83c1f6..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 8) - if err != nil { - return 0, err - } - return uint8(v), nil -} - -// GetUint8 return the uint8 value of a flag with the given name -func (f *FlagSet) GetUint8(name string) (uint8, error) { - val, err := f.getFlagType(name, "uint8", uint8Conv) - if err != nil { - return 0, err - } - return val.(uint8), nil -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/pflag/uint_slice.go b/openshift/tools/vendor/github.com/spf13/pflag/uint_slice.go deleted file mode 100644 index 5fa924835e..0000000000 --- a/openshift/tools/vendor/github.com/spf13/pflag/uint_slice.go +++ /dev/null @@ -1,168 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- uintSlice Value -type uintSliceValue struct { - value *[]uint - changed bool -} - -func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { - uisv := new(uintSliceValue) - uisv.value = p - *uisv.value = val - return uisv -} - -func (s *uintSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return err - } - out[i] = uint(u) - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *uintSliceValue) Type() string { - return "uintSlice" -} - -func (s *uintSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *uintSliceValue) fromString(val string) (uint, error) { - t, err := strconv.ParseUint(val, 10, 0) - if err != nil { - return 0, err - } - return uint(t), nil -} - -func (s *uintSliceValue) toString(val uint) string { - return fmt.Sprintf("%d", val) -} - -func (s *uintSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *uintSliceValue) Replace(val []string) error { - out := make([]uint, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *uintSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func uintSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []uint{}, nil - } - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return nil, err - } - out[i] = uint(u) - } - return out, nil -} - -// GetUintSlice returns the []uint value of a flag with the given name. -func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { - val, err := f.getFlagType(name, "uintSlice", uintSliceConv) - if err != nil { - return []uint{}, err - } - return val.([]uint), nil -} - -// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. -// The argument p points to a []uint variable in which to store the value of the flag. -func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. -// The argument p points to a uint[] variable in which to store the value of the flag. -func UintSliceVar(p *[]uint, name string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, "", value, usage) - return &p -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func UintSlice(name string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, "", value, usage) -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, shorthand, value, usage) -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/.editorconfig b/openshift/tools/vendor/github.com/spf13/viper/.editorconfig deleted file mode 100644 index 1f664d13a5..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/.editorconfig +++ /dev/null @@ -1,18 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.go] -indent_style = tab - -[{Makefile,*.mk}] -indent_style = tab - -[*.nix] -indent_size = 2 diff --git a/openshift/tools/vendor/github.com/spf13/viper/.envrc b/openshift/tools/vendor/github.com/spf13/viper/.envrc deleted file mode 100644 index 2e0f9f5f71..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/.envrc +++ /dev/null @@ -1,4 +0,0 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" -fi -use flake . --impure diff --git a/openshift/tools/vendor/github.com/spf13/viper/.gitignore b/openshift/tools/vendor/github.com/spf13/viper/.gitignore deleted file mode 100644 index f1bbd42803..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -/.devenv/ -/.direnv/ -/.idea/ -/.pre-commit-config.yaml -/bin/ -/build/ -/var/ -/vendor/ diff --git a/openshift/tools/vendor/github.com/spf13/viper/.golangci.yaml b/openshift/tools/vendor/github.com/spf13/viper/.golangci.yaml deleted file mode 100644 index 474f41633c..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/.golangci.yaml +++ /dev/null @@ -1,105 +0,0 @@ -run: - timeout: 5m - -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/viper) - gocritic: - # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. - enabled-tags: - - diagnostic - - experimental - - opinionated - - style - disabled-checks: - - importShadow - - unnamedResult - goimports: - local-prefixes: github.com/spf13/viper - -linters: - disable-all: true - enable: - - bodyclose - - dogsled - - dupl - - durationcheck - - exhaustive - - gci - - gocritic - - godot - - gofmt - - gofumpt - - goimports - - gomoddirectives - - goprintffuncname - - govet - - importas - - ineffassign - - makezero - - misspell - - nakedret - - nilerr - - noctx - - nolintlint - - prealloc - - predeclared - - revive - - rowserrcheck - - sqlclosecheck - - staticcheck - - stylecheck - - tparallel - - typecheck - - unconvert - - unparam - - unused - - wastedassign - - whitespace - - # fixme - # - cyclop - # - errcheck - # - errorlint - # - exhaustivestruct - # - forbidigo - # - forcetypeassert - # - gochecknoglobals - # - gochecknoinits - # - gocognit - # - goconst - # - gocyclo - # - gosec - # - gosimple - # - ifshort - # - lll - # - nlreturn - # - paralleltest - # - scopelint - # - thelper - # - wrapcheck - - # unused - # - depguard - # - goheader - # - gomodguard - - # deprecated - # - deadcode - # - structcheck - # - varcheck - - # don't enable: - # - asciicheck - # - funlen - # - godox - # - goerr113 - # - gomnd - # - interfacer - # - maligned - # - nestif - # - testpackage - # - wsl diff --git a/openshift/tools/vendor/github.com/spf13/viper/.yamlignore b/openshift/tools/vendor/github.com/spf13/viper/.yamlignore deleted file mode 100644 index c04c4dead1..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/.yamlignore +++ /dev/null @@ -1,2 +0,0 @@ -# TODO: FIXME -/.github/ diff --git a/openshift/tools/vendor/github.com/spf13/viper/.yamllint.yaml b/openshift/tools/vendor/github.com/spf13/viper/.yamllint.yaml deleted file mode 100644 index bac19ce18b..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/.yamllint.yaml +++ /dev/null @@ -1,6 +0,0 @@ -ignore-from-file: [.gitignore, .yamlignore] - -extends: default - -rules: - line-length: disable diff --git a/openshift/tools/vendor/github.com/spf13/viper/LICENSE b/openshift/tools/vendor/github.com/spf13/viper/LICENSE deleted file mode 100644 index 4527efb9c0..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/openshift/tools/vendor/github.com/spf13/viper/Makefile b/openshift/tools/vendor/github.com/spf13/viper/Makefile deleted file mode 100644 index a77b9c81c1..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/Makefile +++ /dev/null @@ -1,87 +0,0 @@ -# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html - -OS = $(shell uname | tr A-Z a-z) -export PATH := $(abspath bin/):${PATH} - -# Build variables -BUILD_DIR ?= build -export CGO_ENABLED ?= 0 -export GOOS = $(shell go env GOOS) -ifeq (${VERBOSE}, 1) -ifeq ($(filter -v,${GOARGS}),) - GOARGS += -v -endif -TEST_FORMAT = short-verbose -endif - -# Dependency versions -GOTESTSUM_VERSION = 1.9.0 -GOLANGCI_VERSION = 1.53.3 - -# Add the ability to override some variables -# Use with care --include override.mk - -.PHONY: clear -clear: ## Clear the working area and the project - rm -rf bin/ - -.PHONY: check -check: test lint ## Run tests and linters - - -TEST_PKGS ?= ./... -.PHONY: test -test: TEST_FORMAT ?= short -test: SHELL = /bin/bash -test: export CGO_ENABLED=1 -test: bin/gotestsum ## Run tests - @mkdir -p ${BUILD_DIR} - bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...) - -.PHONY: lint -lint: lint-go lint-yaml -lint: ## Run linters - -.PHONY: lint-go -lint-go: - golangci-lint run $(if ${CI},--out-format github-actions,) - -.PHONY: lint-yaml -lint-yaml: - yamllint $(if ${CI},-f github,) --no-warnings . - -.PHONY: fmt -fmt: ## Format code - golangci-lint run --fix - -deps: bin/golangci-lint bin/gotestsum yamllint -deps: ## Install dependencies - -bin/gotestsum: - @mkdir -p bin - curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum && chmod +x ./bin/gotestsum - -bin/golangci-lint: - @mkdir -p bin - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash -s -- v${GOLANGCI_VERSION} - -.PHONY: yamllint -yamllint: - pip3 install --user yamllint - -# Add custom targets here --include custom.mk - -.PHONY: list -list: ## List all make targets - @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort - -.PHONY: help -.DEFAULT_GOAL := help -help: - @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -# Variable outputting/exporting rules -var-%: ; @echo $($*) -varexport-%: ; @echo $*=$($*) diff --git a/openshift/tools/vendor/github.com/spf13/viper/README.md b/openshift/tools/vendor/github.com/spf13/viper/README.md deleted file mode 100644 index 769a5d900d..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/README.md +++ /dev/null @@ -1,931 +0,0 @@ -> ## Viper v2 feedback -> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9 -> -> **Thank you!** - -![viper logo](https://github.com/user-attachments/assets/acae9193-2974-41f3-808d-2d433f5ada5e) - - -[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) -[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) -[![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) - -**Go configuration with fangs!** - -Many Go projects are built using Viper including: - -* [Hugo](http://gohugo.io) -* [EMC RexRay](http://rexray.readthedocs.org/en/stable/) -* [Imgur’s Incus](https://github.com/Imgur/incus) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -* [Docker Notary](https://github.com/docker/Notary) -* [BloomApi](https://www.bloomapi.com/) -* [doctl](https://github.com/digitalocean/doctl) -* [Clairctl](https://github.com/jgsqware/clairctl) -* [Mercure](https://mercure.rocks) -* [Meshery](https://github.com/meshery/meshery) -* [Bearer](https://github.com/bearer/bearer) -* [Coder](https://github.com/coder/coder) -* [Vitess](https://vitess.io/) - - -## Install - -```shell -go get github.com/spf13/viper -``` - -**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies. - - -## What is Viper? - -Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors). -It is designed to work within an application, and can handle all types of configuration needs -and formats. It supports: - -* setting defaults -* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files -* live watching and re-reading of config files (optional) -* reading from environment variables -* reading from remote config systems (etcd or Consul), and watching changes -* reading from command line flags -* reading from buffer -* setting explicit values - -Viper can be thought of as a registry for all of your applications configuration needs. - - -## Why Viper? - -When building a modern application, you don’t want to worry about -configuration file formats; you want to focus on building awesome software. -Viper is here to help with that. - -Viper does the following for you: - -1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats. -2. Provide a mechanism to set default values for your different configuration options. -3. Provide a mechanism to set override values for options specified through command line flags. -4. Provide an alias system to easily rename parameters without breaking existing code. -5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default. - -Viper uses the following precedence order. Each item takes precedence over the item below it: - - * explicit call to `Set` - * flag - * env - * config - * key/value store - * default - -**Important:** Viper configuration keys are case insensitive. -There are ongoing discussions about making that optional. - - -## Putting Values into Viper - -### Establishing Defaults - -A good configuration system will support default values. A default value is not -required for a key, but it’s useful in the event that a key hasn't been set via -config file, environment variable, remote configuration or flag. - -Examples: - -```go -viper.SetDefault("ContentDir", "content") -viper.SetDefault("LayoutDir", "layouts") -viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"}) -``` - -### Reading Config Files - -Viper requires minimal configuration so it knows where to look for config files. -Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but -currently a single Viper instance only supports a single configuration file. -Viper does not default to any configuration search paths leaving defaults decision -to an application. - -Here is an example of how to use Viper to search for and read a configuration file. -None of the specific paths are required, but at least one path should be provided -where a configuration file is expected. - -```go -viper.SetConfigName("config") // name of config file (without extension) -viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name -viper.AddConfigPath("/etc/appname/") // path to look for the config file in -viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths -viper.AddConfigPath(".") // optionally look for config in the working directory -err := viper.ReadInConfig() // Find and read the config file -if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("fatal error config file: %w", err)) -} -``` - -You can handle the specific case where no config file is found like this: - -```go -if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } -} - -// Config file found and successfully parsed -``` - -*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmatically. For those configuration files that lie in the home of the user without any extension like `.bashrc` - -### Writing Config Files - -Reading from config files is useful, but at times you want to store all modifications made at run time. -For that, a bunch of commands are available, each with its own purpose: - -* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists. -* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists. -* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists. -* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists. - -As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate. - -A small examples section: - -```go -viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName' -viper.SafeWriteConfig() -viper.WriteConfigAs("/path/to/my/.config") -viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written -viper.SafeWriteConfigAs("/path/to/my/.other_config") -``` - -### Watching and re-reading config files - -Viper supports the ability to have your application live read a config file while running. - -Gone are the days of needing to restart a server to have a config take effect, -viper powered applications can read an update to a config file while running and -not miss a beat. - -Simply tell the viper instance to watchConfig. -Optionally you can provide a function for Viper to run each time a change occurs. - -**Make sure you add all of the configPaths prior to calling `WatchConfig()`** - -```go -viper.OnConfigChange(func(e fsnotify.Event) { - fmt.Println("Config file changed:", e.Name) -}) -viper.WatchConfig() -``` - -### Reading Config from io.Reader - -Viper predefines many configuration sources such as files, environment -variables, flags, and remote K/V store, but you are not bound to them. You can -also implement your own required configuration source and feed it to viper. - -```go -viper.SetConfigType("yaml") // or viper.SetConfigType("YAML") - -// any approach to require this configuration into your program. -var yamlExample = []byte(` -Hacker: true -name: steve -hobbies: -- skateboarding -- snowboarding -- go -clothing: - jacket: leather - trousers: denim -age: 35 -eyes : brown -beard: true -`) - -viper.ReadConfig(bytes.NewBuffer(yamlExample)) - -viper.Get("name") // this would be "steve" -``` - -### Setting Overrides - -These could be from a command line flag, or from your own application logic. - -```go -viper.Set("Verbose", true) -viper.Set("LogFile", LogFile) -viper.Set("host.port", 5899) // set subset -``` - -### Registering and Using Aliases - -Aliases permit a single value to be referenced by multiple keys - -```go -viper.RegisterAlias("loud", "Verbose") - -viper.Set("verbose", true) // same result as next line -viper.Set("loud", true) // same result as prior line - -viper.GetBool("loud") // true -viper.GetBool("verbose") // true -``` - -### Working with Environment Variables - -Viper has full support for environment variables. This enables 12 factor -applications out of the box. There are five methods that exist to aid working -with ENV: - - * `AutomaticEnv()` - * `BindEnv(string...) : error` - * `SetEnvPrefix(string)` - * `SetEnvKeyReplacer(string...) *strings.Replacer` - * `AllowEmptyEnv(bool)` - -_When working with ENV variables, it’s important to recognize that Viper -treats ENV variables as case sensitive._ - -Viper provides a mechanism to try to ensure that ENV variables are unique. By -using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from -the environment variables. Both `BindEnv` and `AutomaticEnv` will use this -prefix. - -`BindEnv` takes one or more parameters. The first parameter is the key name, the -rest are the name of the environment variables to bind to this key. If more than -one are provided, they will take precedence in the specified order. The name of -the environment variable is case sensitive. If the ENV variable name is not provided, then -Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter), -it **does not** automatically add the prefix. For example if the second parameter is "id", -Viper will look for the ENV variable "ID". - -One important thing to recognize when working with ENV variables is that the -value will be read each time it is accessed. Viper does not fix the value when -the `BindEnv` is called. - -`AutomaticEnv` is a powerful helper especially when combined with -`SetEnvPrefix`. When called, Viper will check for an environment variable any -time a `viper.Get` request is made. It will apply the following rules. It will -check for an environment variable with a name matching the key uppercased and -prefixed with the `EnvPrefix` if set. - -`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env -keys to an extent. This is useful if you want to use `-` or something in your -`Get()` calls, but want your environmental variables to use `_` delimiters. An -example of using it can be found in `viper_test.go`. - -Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function. -Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic. - -By default empty environment variables are considered unset and will fall back to -the next configuration source. To treat empty environment variables as set, use -the `AllowEmptyEnv` method. - -#### Env example - -```go -SetEnvPrefix("spf") // will be uppercased automatically -BindEnv("id") - -os.Setenv("SPF_ID", "13") // typically done outside of the app - -id := Get("id") // 13 -``` - -### Working with Flags - -Viper has the ability to bind to flags. Specifically, Viper supports `Pflags` -as used in the [Cobra](https://github.com/spf13/cobra) library. - -Like `BindEnv`, the value is not set when the binding method is called, but when -it is accessed. This means you can bind as early as you want, even in an -`init()` function. - -For individual flags, the `BindPFlag()` method provides this functionality. - -Example: - -```go -serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -``` - -You can also bind an existing set of pflags (pflag.FlagSet): - -Example: - -```go -pflag.Int("flagname", 1234, "help message for flagname") - -pflag.Parse() -viper.BindPFlags(pflag.CommandLine) - -i := viper.GetInt("flagname") // retrieve values from viper instead of pflag -``` - -The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude -the use of other packages that use the [flag](https://golang.org/pkg/flag/) -package from the standard library. The pflag package can handle the flags -defined for the flag package by importing these flags. This is accomplished -by a calling a convenience function provided by the pflag package called -AddGoFlagSet(). - -Example: - -```go -package main - -import ( - "flag" - "github.com/spf13/pflag" -) - -func main() { - - // using standard library "flag" package - flag.Int("flagname", 1234, "help message for flagname") - - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - pflag.Parse() - viper.BindPFlags(pflag.CommandLine) - - i := viper.GetInt("flagname") // retrieve value from viper - - // ... -} -``` - -#### Flag interfaces - -Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`. - -`FlagValue` represents a single flag. This is a very simple example on how to implement this interface: - -```go -type myFlag struct {} -func (f myFlag) HasChanged() bool { return false } -func (f myFlag) Name() string { return "my-flag-name" } -func (f myFlag) ValueString() string { return "my-flag-value" } -func (f myFlag) ValueType() string { return "string" } -``` - -Once your flag implements this interface, you can simply tell Viper to bind it: - -```go -viper.BindFlagValue("my-flag-name", myFlag{}) -``` - -`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface: - -```go -type myFlagSet struct { - flags []myFlag -} - -func (f myFlagSet) VisitAll(fn func(FlagValue)) { - for _, flag := range flags { - fn(flag) - } -} -``` - -Once your flag set implements this interface, you can simply tell Viper to bind it: - -```go -fSet := myFlagSet{ - flags: []myFlag{myFlag{}, myFlag{}}, -} -viper.BindFlagValues("my-flags", fSet) -``` - -### Remote Key/Value Store Support - -To enable remote support in Viper, do a blank import of the `viper/remote` -package: - -`import _ "github.com/spf13/viper/remote"` - -Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path -in a Key/Value store such as etcd or Consul. These values take precedence over -default values, but are overridden by configuration values retrieved from disk, -flags, or environment variables. - -Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. - -Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve -configuration from the K/V store, which means that you can store your -configuration values encrypted and have them automatically decrypted if you have -the correct gpg keyring. Encryption is optional. - -You can use remote configuration in conjunction with local configuration, or -independently of it. - -`crypt` has a command-line helper that you can use to put configurations in your -K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. - -```bash -$ go get github.com/sagikazarmark/crypt/bin/crypt -$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json -``` - -Confirm that your value was set: - -```bash -$ crypt get -plaintext /config/hugo.json -``` - -See the `crypt` documentation for examples of how to set encrypted values, or -how to use Consul. - -### Remote Key/Value Store Example - Unencrypted - -#### etcd -```go -viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -#### etcd3 -```go -viper.AddRemoteProvider("etcd3", "http://127.0.0.1:4001","/config/hugo.json") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -#### Consul -You need to set a key to Consul key/value storage with JSON value containing your desired config. -For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: - -```json -{ - "port": 8080, - "hostname": "myhostname.com" -} -``` - -```go -viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY") -viper.SetConfigType("json") // Need to explicitly set this to json -err := viper.ReadRemoteConfig() - -fmt.Println(viper.Get("port")) // 8080 -fmt.Println(viper.Get("hostname")) // myhostname.com -``` - -#### Firestore - -```go -viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document") -viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml" -err := viper.ReadRemoteConfig() -``` - -Of course, you're allowed to use `SecureRemoteProvider` also - - -#### NATS - -```go -viper.AddRemoteProvider("nats", "nats://127.0.0.1:4222", "myapp.config") -viper.SetConfigType("json") -err := viper.ReadRemoteConfig() -``` - -### Remote Key/Value Store Example - Encrypted - -```go -viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -### Watching Changes in etcd - Unencrypted - -```go -// alternatively, you can create a new viper instance. -var runtime_viper = viper.New() - -runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml") -runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" - -// read from remote config the first time. -err := runtime_viper.ReadRemoteConfig() - -// unmarshal config -runtime_viper.Unmarshal(&runtime_conf) - -// open a goroutine to watch remote changes forever -go func(){ - for { - time.Sleep(time.Second * 5) // delay after each request - - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } - - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) - } -}() -``` - -## Getting Values From Viper - -In Viper, there are a few ways to get a value depending on the value’s type. -The following functions and methods exist: - - * `Get(key string) : any` - * `GetBool(key string) : bool` - * `GetFloat64(key string) : float64` - * `GetInt(key string) : int` - * `GetIntSlice(key string) : []int` - * `GetString(key string) : string` - * `GetStringMap(key string) : map[string]any` - * `GetStringMapString(key string) : map[string]string` - * `GetStringSlice(key string) : []string` - * `GetTime(key string) : time.Time` - * `GetDuration(key string) : time.Duration` - * `IsSet(key string) : bool` - * `AllSettings() : map[string]any` - -One important thing to recognize is that each Get function will return a zero -value if it’s not found. To check if a given key exists, the `IsSet()` method -has been provided. - -The zero value will also be returned if the value is set, but fails to parse -as the requested type. - -Example: -```go -viper.GetString("logfile") // case-insensitive Setting & Getting -if viper.GetBool("verbose") { - fmt.Println("verbose enabled") -} -``` -### Accessing nested keys - -The accessor methods also accept formatted paths to deeply nested keys. For -example, if the following JSON file is loaded: - -```json -{ - "host": { - "address": "localhost", - "port": 5799 - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -``` - -Viper can access a nested field by passing a `.` delimited path of keys: - -```go -GetString("datastore.metric.host") // (returns "127.0.0.1") -``` - -This obeys the precedence rules established above; the search for the path -will cascade through the remaining configuration registries until found. - -For example, given this configuration file, both `datastore.metric.host` and -`datastore.metric.port` are already defined (and may be overridden). If in addition -`datastore.metric.protocol` was defined in the defaults, Viper would also find it. - -However, if `datastore.metric` was overridden (by a flag, an environment variable, -the `Set()` method, …) with an immediate value, then all sub-keys of -`datastore.metric` become undefined, they are “shadowed” by the higher-priority -configuration level. - -Viper can access array indices by using numbers in the path. For example: - -```jsonc -{ - "host": { - "address": "localhost", - "ports": [ - 5799, - 6029 - ] - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -GetInt("host.ports.1") // returns 6029 - -``` - -Lastly, if there exists a key that matches the delimited key path, its value -will be returned instead. E.g. - -```jsonc -{ - "datastore.metric.host": "0.0.0.0", - "host": { - "address": "localhost", - "port": 5799 - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -GetString("datastore.metric.host") // returns "0.0.0.0" -``` - -### Extracting a sub-tree - -When developing reusable modules, it's often useful to extract a subset of the configuration -and pass it to a module. This way the module can be instantiated more than once, with different configurations. - -For example, an application might use multiple different cache stores for different purposes: - -```yaml -cache: - cache1: - max-items: 100 - item-size: 64 - cache2: - max-items: 200 - item-size: 80 -``` - -We could pass the cache name to a module (eg. `NewCache("cache1")`), -but it would require weird concatenation for accessing config keys and would be less separated from the global config. - -So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration: - -```go -cache1Config := viper.Sub("cache.cache1") -if cache1Config == nil { // Sub returns nil if the key cannot be found - panic("cache configuration not found") -} - -cache1 := NewCache(cache1Config) -``` - -**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found. - -Internally, the `NewCache` function can address `max-items` and `item-size` keys directly: - -```go -func NewCache(v *Viper) *Cache { - return &Cache{ - MaxItems: v.GetInt("max-items"), - ItemSize: v.GetInt("item-size"), - } -} -``` - -The resulting code is easy to test, since it's decoupled from the main config structure, -and easier to reuse (for the same reason). - - -### Unmarshaling - -You also have the option of Unmarshaling all or a specific value to a struct, map, -etc. - -There are two methods to do this: - - * `Unmarshal(rawVal any) : error` - * `UnmarshalKey(key string, rawVal any) : error` - -Example: - -```go -type config struct { - Port int - Name string - PathMap string `mapstructure:"path_map"` -} - -var C config - -err := viper.Unmarshal(&C) -if err != nil { - t.Fatalf("unable to decode into struct, %v", err) -} -``` - -If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter), -you have to change the delimiter: - -```go -v := viper.NewWithOptions(viper.KeyDelimiter("::")) - -v.SetDefault("chart::values", map[string]any{ - "ingress": map[string]any{ - "annotations": map[string]any{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, -}) - -type config struct { - Chart struct{ - Values map[string]any - } -} - -var C config - -v.Unmarshal(&C) -``` - -Viper also supports unmarshaling into embedded structs: - -```go -/* -Example config: - -module: - enabled: true - token: 89h3f98hbwf987h3f98wenf89ehf -*/ -type config struct { - Module struct { - Enabled bool - - moduleConfig `mapstructure:",squash"` - } -} - -// moduleConfig could be in a module specific package -type moduleConfig struct { - Token string -} - -var C config - -err := viper.Unmarshal(&C) -if err != nil { - t.Fatalf("unable to decode into struct, %v", err) -} -``` - -Viper uses [github.com/go-viper/mapstructure](https://github.com/go-viper/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. - -### Decoding custom formats - -A frequently requested feature for Viper is adding more value formats and decoders. -For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. - -This is already available in Viper using mapstructure decode hooks. - -Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). - -### Marshalling to string - -You may need to marshal all the settings held in viper into a string rather than write them to a file. -You can use your favorite format's marshaller with the config returned by `AllSettings()`. - -```go -import ( - yaml "gopkg.in/yaml.v2" - // ... -) - -func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) -} -``` - -## Viper or Vipers? - -Viper comes with a global instance (singleton) out of the box. - -Although it makes setting up configuration easy, -using it is generally discouraged as it makes testing harder and can lead to unexpected behavior. - -The best practice is to initialize a Viper instance and pass that around when necessary. - -The global instance _MAY_ be deprecated in the future. -See [#1855](https://github.com/spf13/viper/issues/1855) for more details. - -### Working with multiple vipers - -You can also create many different vipers for use in your application. Each will -have its own unique set of configurations and values. Each can read from a -different config file, key value store, etc. All of the functions that viper -package supports are mirrored as methods on a viper. - -Example: - -```go -x := viper.New() -y := viper.New() - -x.SetDefault("ContentDir", "content") -y.SetDefault("ContentDir", "foobar") - -//... -``` - -When working with multiple vipers, it is up to the user to keep track of the -different vipers. - - -## Q & A - -### Why is it called “Viper”? - -A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) -to [Cobra](https://github.com/spf13/cobra). While both can operate completely -independently, together they make a powerful pair to handle much of your -application foundation needs. - -### Why is it called “Cobra”? - -Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? - -### Does Viper support case sensitive keys? - -**tl;dr:** No. - -Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars). -In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive. - -There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much. - -You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9 - -### Is it safe to concurrently read and write to a viper? - -No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic. - -## Troubleshooting - -See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). - -## Development - -**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** - -_Alternatively, install [Go](https://go.dev/dl/) on your computer then run `make deps` to install the rest of the dependencies._ - -Run the test suite: - -```shell -make test -``` - -Run linters: - -```shell -make lint # pass -j option to run them in parallel -``` - -Some linter violations can automatically be fixed: - -```shell -make fmt -``` - -## License - -The project is licensed under the [MIT License](LICENSE). diff --git a/openshift/tools/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/openshift/tools/vendor/github.com/spf13/viper/TROUBLESHOOTING.md deleted file mode 100644 index b68993d412..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ /dev/null @@ -1,32 +0,0 @@ -# Troubleshooting - -## Unmarshaling doesn't work - -The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags. - -## Cannot find package - -Viper installation seems to fail a lot lately with the following (or a similar) error: - -``` -cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: -/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT) -/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH) -``` - -As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). - -The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that. - -**tl;dr* `export GO111MODULE=on` - -## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file - -This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740). - -Potential solutions are: - -1. Quoting values resolved as boolean -1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build) diff --git a/openshift/tools/vendor/github.com/spf13/viper/UPDATES.md b/openshift/tools/vendor/github.com/spf13/viper/UPDATES.md deleted file mode 100644 index ccf413ed7e..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/UPDATES.md +++ /dev/null @@ -1,126 +0,0 @@ -# Update Log - -**This document details any major updates required to use new features or improvements in Viper.** - -## v1.20.x - -### New file searching API - -Viper now includes a new file searching API that allows users to customize how Viper looks for config files. - -Viper accepts a custom [`Finder`](https://pkg.go.dev/github.com/spf13/viper#Finder) interface implementation: - -```go -// Finder looks for files and directories in an [afero.Fs] filesystem. -type Finder interface { - Find(fsys afero.Fs) ([]string, error) -} -``` - -It is supposed to return a list of paths to config files. - -The default implementation uses [github.com/sagikazarmark/locafero](https://github.com/sagikazarmark/locafero) under the hood. - -You can supply your own implementation using `WithFinder`: - -```go -v := viper.NewWithOptions( - viper.WithFinder(&MyFinder{}), -) -``` - -For more information, check out the [Finder examples](https://pkg.go.dev/github.com/spf13/viper#Finder) -and the [documentation](https://pkg.go.dev/github.com/sagikazarmark/locafero) for the locafero package. - -### New encoding API - -Viper now allows customizing the encoding layer by providing an API for encoding and decoding configuration data: - -```go -// Encoder encodes Viper's internal data structures into a byte representation. -// It's primarily used for encoding a map[string]any into a file format. -type Encoder interface { - Encode(v map[string]any) ([]byte, error) -} - -// Decoder decodes the contents of a byte slice into Viper's internal data structures. -// It's primarily used for decoding contents of a file into a map[string]any. -type Decoder interface { - Decode(b []byte, v map[string]any) error -} - -// Codec combines [Encoder] and [Decoder] interfaces. -type Codec interface { - Encoder - Decoder -} -``` - -By default, Viper includes the following codecs: - -- JSON -- TOML -- YAML -- Dotenv - -The rest of the codecs are moved to [github.com/go-viper/encoding](https://github.com/go-viper/encoding) - -Customizing the encoding layer is possible by providing a custom registry of codecs: - -- [Encoder](https://pkg.go.dev/github.com/spf13/viper#Encoder) -> [EncoderRegistry](https://pkg.go.dev/github.com/spf13/viper#EncoderRegistry) -- [Decoder](https://pkg.go.dev/github.com/spf13/viper#Decoder) -> [DecoderRegistry](https://pkg.go.dev/github.com/spf13/viper#DecoderRegistry) -- [Codec](https://pkg.go.dev/github.com/spf13/viper#Codec) -> [CodecRegistry](https://pkg.go.dev/github.com/spf13/viper#CodecRegistry) - -You can supply the registry of codecs to Viper using the appropriate `With*Registry` function: - -```go -codecRegistry := viper.NewCodecRegistry() - -codecRegistry.RegisterCodec("myformat", &MyCodec{}) - -v := viper.NewWithOptions( - viper.WithCodecRegistry(codecRegistry), -) -``` - -### BREAKING: HCL, Java properties, INI removed from core - -In order to reduce third-party dependencies, Viper dropped support for the following formats from the core: - -- HCL -- Java properties -- INI - -You can still use these formats though by importing them from [github.com/go-viper/encoding](https://github.com/go-viper/encoding): - -```go -import ( - "github.com/go-viper/encoding/hcl" - "github.com/go-viper/encoding/javaproperties" - "github.com/go-viper/encoding/ini" -) - -codecRegistry := viper.NewCodecRegistry() - -{ - codec := hcl.Codec{} - - codecRegistry.RegisterCodec("hcl", codec) - codecRegistry.RegisterCodec("tfvars", codec) - -} - -{ - codec := &javaproperties.Codec{} - - codecRegistry.RegisterCodec("properties", codec) - codecRegistry.RegisterCodec("props", codec) - codecRegistry.RegisterCodec("prop", codec) -} - -codecRegistry.RegisterCodec("ini", ini.Codec{}) - -v := viper.NewWithOptions( - viper.WithCodecRegistry(codecRegistry), -) -``` diff --git a/openshift/tools/vendor/github.com/spf13/viper/encoding.go b/openshift/tools/vendor/github.com/spf13/viper/encoding.go deleted file mode 100644 index a7da55860e..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/encoding.go +++ /dev/null @@ -1,181 +0,0 @@ -package viper - -import ( - "errors" - "strings" - "sync" - - "github.com/spf13/viper/internal/encoding/dotenv" - "github.com/spf13/viper/internal/encoding/json" - "github.com/spf13/viper/internal/encoding/toml" - "github.com/spf13/viper/internal/encoding/yaml" -) - -// Encoder encodes Viper's internal data structures into a byte representation. -// It's primarily used for encoding a map[string]any into a file format. -type Encoder interface { - Encode(v map[string]any) ([]byte, error) -} - -// Decoder decodes the contents of a byte slice into Viper's internal data structures. -// It's primarily used for decoding contents of a file into a map[string]any. -type Decoder interface { - Decode(b []byte, v map[string]any) error -} - -// Codec combines [Encoder] and [Decoder] interfaces. -type Codec interface { - Encoder - Decoder -} - -// TODO: consider adding specific errors for not found scenarios - -// EncoderRegistry returns an [Encoder] for a given format. -// -// Format is case-insensitive. -// -// [EncoderRegistry] returns an error if no [Encoder] is registered for the format. -type EncoderRegistry interface { - Encoder(format string) (Encoder, error) -} - -// DecoderRegistry returns an [Decoder] for a given format. -// -// Format is case-insensitive. -// -// [DecoderRegistry] returns an error if no [Decoder] is registered for the format. -type DecoderRegistry interface { - Decoder(format string) (Decoder, error) -} - -// [CodecRegistry] combines [EncoderRegistry] and [DecoderRegistry] interfaces. -type CodecRegistry interface { - EncoderRegistry - DecoderRegistry -} - -// WithEncoderRegistry sets a custom [EncoderRegistry]. -func WithEncoderRegistry(r EncoderRegistry) Option { - return optionFunc(func(v *Viper) { - if r == nil { - return - } - - v.encoderRegistry = r - }) -} - -// WithDecoderRegistry sets a custom [DecoderRegistry]. -func WithDecoderRegistry(r DecoderRegistry) Option { - return optionFunc(func(v *Viper) { - if r == nil { - return - } - - v.decoderRegistry = r - }) -} - -// WithCodecRegistry sets a custom [EncoderRegistry] and [DecoderRegistry]. -func WithCodecRegistry(r CodecRegistry) Option { - return optionFunc(func(v *Viper) { - if r == nil { - return - } - - v.encoderRegistry = r - v.decoderRegistry = r - }) -} - -// DefaultCodecRegistry is a simple implementation of [CodecRegistry] that allows registering custom [Codec]s. -type DefaultCodecRegistry struct { - codecs map[string]Codec - - mu sync.RWMutex - once sync.Once -} - -// NewCodecRegistry returns a new [CodecRegistry], ready to accept custom [Codec]s. -func NewCodecRegistry() *DefaultCodecRegistry { - r := &DefaultCodecRegistry{} - - r.init() - - return r -} - -func (r *DefaultCodecRegistry) init() { - r.once.Do(func() { - r.codecs = map[string]Codec{} - }) -} - -// RegisterCodec registers a custom [Codec]. -// -// Format is case-insensitive. -func (r *DefaultCodecRegistry) RegisterCodec(format string, codec Codec) error { - r.init() - - r.mu.Lock() - defer r.mu.Unlock() - - r.codecs[strings.ToLower(format)] = codec - - return nil -} - -// Encoder implements the [EncoderRegistry] interface. -// -// Format is case-insensitive. -func (r *DefaultCodecRegistry) Encoder(format string) (Encoder, error) { - encoder, ok := r.codec(format) - if !ok { - return nil, errors.New("encoder not found for this format") - } - - return encoder, nil -} - -// Decoder implements the [DecoderRegistry] interface. -// -// Format is case-insensitive. -func (r *DefaultCodecRegistry) Decoder(format string) (Decoder, error) { - decoder, ok := r.codec(format) - if !ok { - return nil, errors.New("decoder not found for this format") - } - - return decoder, nil -} - -func (r *DefaultCodecRegistry) codec(format string) (Codec, bool) { - r.mu.Lock() - defer r.mu.Unlock() - - format = strings.ToLower(format) - - if r.codecs != nil { - codec, ok := r.codecs[format] - if ok { - return codec, true - } - } - - switch format { - case "yaml", "yml": - return yaml.Codec{}, true - - case "json": - return json.Codec{}, true - - case "toml": - return toml.Codec{}, true - - case "dotenv", "env": - return &dotenv.Codec{}, true - } - - return nil, false -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/experimental.go b/openshift/tools/vendor/github.com/spf13/viper/experimental.go deleted file mode 100644 index 6e19e8a100..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/experimental.go +++ /dev/null @@ -1,8 +0,0 @@ -package viper - -// ExperimentalBindStruct tells Viper to use the new bind struct feature. -func ExperimentalBindStruct() Option { - return optionFunc(func(v *Viper) { - v.experimentalBindStruct = true - }) -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/file.go b/openshift/tools/vendor/github.com/spf13/viper/file.go deleted file mode 100644 index 50a40581d0..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/file.go +++ /dev/null @@ -1,104 +0,0 @@ -package viper - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/sagikazarmark/locafero" - "github.com/spf13/afero" -) - -// ExperimentalFinder tells Viper to use the new Finder interface for finding configuration files. -func ExperimentalFinder() Option { - return optionFunc(func(v *Viper) { - v.experimentalFinder = true - }) -} - -// Search for a config file. -func (v *Viper) findConfigFile() (string, error) { - finder := v.finder - - if finder == nil && v.experimentalFinder { - var names []string - - if v.configType != "" { - names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) - } else { - names = locafero.NameWithExtensions(v.configName, SupportedExts...) - } - - finder = locafero.Finder{ - Paths: v.configPaths, - Names: names, - Type: locafero.FileTypeFile, - } - } - - if finder != nil { - return v.findConfigFileWithFinder(finder) - } - - return v.findConfigFileOld() -} - -func (v *Viper) findConfigFileWithFinder(finder Finder) (string, error) { - results, err := finder.Find(v.fs) - if err != nil { - return "", err - } - - if len(results) == 0 { - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} - } - - // We call clean on the final result to ensure that the path is in its canonical form. - // This is mostly for consistent path handling and to make sure tests pass. - return results[0], nil -} - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFileOld() (string, error) { - v.logger.Info("searching for config in paths", "paths", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - -func (v *Viper) searchInPath(in string) (filename string) { - v.logger.Debug("searching for config in path", "path", in) - for _, ext := range SupportedExts { - v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - if v.configType != "" { - if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { - return filepath.Join(in, v.configName) - } - } - - return "" -} - -// exists checks if file exists. -func exists(fs afero.Fs, path string) (bool, error) { - stat, err := fs.Stat(path) - if err == nil { - return !stat.IsDir(), nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/finder.go b/openshift/tools/vendor/github.com/spf13/viper/finder.go deleted file mode 100644 index 9b203ea69a..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/finder.go +++ /dev/null @@ -1,55 +0,0 @@ -package viper - -import ( - "errors" - - "github.com/spf13/afero" -) - -// WithFinder sets a custom [Finder]. -func WithFinder(f Finder) Option { - return optionFunc(func(v *Viper) { - if f == nil { - return - } - - v.finder = f - }) -} - -// Finder looks for files and directories in an [afero.Fs] filesystem. -type Finder interface { - Find(fsys afero.Fs) ([]string, error) -} - -// Finders combines multiple finders into one. -func Finders(finders ...Finder) Finder { - return &combinedFinder{finders: finders} -} - -// combinedFinder is a Finder that combines multiple finders. -type combinedFinder struct { - finders []Finder -} - -// Find implements the [Finder] interface. -func (c *combinedFinder) Find(fsys afero.Fs) ([]string, error) { - var results []string - var errs []error - - for _, finder := range c.finders { - if finder == nil { - continue - } - - r, err := finder.Find(fsys) - if err != nil { - errs = append(errs, err) - continue - } - - results = append(results, r...) - } - - return results, errors.Join(errs...) -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/flags.go b/openshift/tools/vendor/github.com/spf13/viper/flags.go deleted file mode 100644 index de033ed58f..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/flags.go +++ /dev/null @@ -1,57 +0,0 @@ -package viper - -import "github.com/spf13/pflag" - -// FlagValueSet is an interface that users can implement -// to bind a set of flags to viper. -type FlagValueSet interface { - VisitAll(fn func(FlagValue)) -} - -// FlagValue is an interface that users can implement -// to bind different flags to viper. -type FlagValue interface { - HasChanged() bool - Name() string - ValueString() string - ValueType() string -} - -// pflagValueSet is a wrapper around *pflag.ValueSet -// that implements FlagValueSet. -type pflagValueSet struct { - flags *pflag.FlagSet -} - -// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet. -func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { - p.flags.VisitAll(func(flag *pflag.Flag) { - fn(pflagValue{flag}) - }) -} - -// pflagValue is a wrapper around *pflag.flag -// that implements FlagValue. -type pflagValue struct { - flag *pflag.Flag -} - -// HasChanged returns whether the flag has changes or not. -func (p pflagValue) HasChanged() bool { - return p.flag.Changed -} - -// Name returns the name of the flag. -func (p pflagValue) Name() string { - return p.flag.Name -} - -// ValueString returns the value of the flag as a string. -func (p pflagValue) ValueString() string { - return p.flag.Value.String() -} - -// ValueType returns the type of the flag as a string. -func (p pflagValue) ValueType() string { - return p.flag.Value.Type() -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/flake.lock b/openshift/tools/vendor/github.com/spf13/viper/flake.lock deleted file mode 100644 index d76dfbddd1..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/flake.lock +++ /dev/null @@ -1,472 +0,0 @@ -{ - "nodes": { - "cachix": { - "inputs": { - "devenv": "devenv_2", - "flake-compat": [ - "devenv", - "flake-compat" - ], - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" - ] - }, - "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", - "owner": "cachix", - "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "cachix", - "type": "github" - } - }, - "devenv": { - "inputs": { - "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1724763216, - "narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=", - "owner": "cachix", - "repo": "devenv", - "rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], - "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] - }, - "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", - "owner": "cachix", - "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "python-rewrite", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1722555600, - "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1709087332, - "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression_2" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1722555339, - "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", - "owner": "cachix", - "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "rolling", - "repo": "devenv-nixpkgs", - "type": "github" - } - }, - "nixpkgs_3": { - "locked": { - "lastModified": 1724748588, - "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", - "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "poetry2nix", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/flake.nix b/openshift/tools/vendor/github.com/spf13/viper/flake.nix deleted file mode 100644 index 52ad7d5814..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/flake.nix +++ /dev/null @@ -1,57 +0,0 @@ -{ - description = "Viper"; - - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.go_1_23; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - yamllint.enable = true; - }; - - packages = with pkgs; [ - gnumake - - golangci-lint - yamllint - ]; - - scripts = { - versions.exec = '' - go version - golangci-lint version - ''; - }; - - enterShell = '' - versions - ''; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - }; - }; - }; -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go deleted file mode 100644 index 3ebc76f029..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go +++ /dev/null @@ -1,61 +0,0 @@ -package dotenv - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/subosito/gotenv" -) - -const keyDelimiter = "_" - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables -// (commonly called as dotenv format). -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - var buf bytes.Buffer - - for _, key := range keys { - _, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key])) - if err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -func (Codec) Decode(b []byte, v map[string]any) error { - var buf bytes.Buffer - - _, err := buf.Write(b) - if err != nil { - return err - } - - env, err := gotenv.StrictParse(&buf) - if err != nil { - return err - } - - for key, value := range env { - v[key] = value - } - - return nil -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go deleted file mode 100644 index 8bfe0a9de2..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go +++ /dev/null @@ -1,41 +0,0 @@ -package dotenv - -import ( - "strings" - - "github.com/spf13/cast" -) - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/json/codec.go deleted file mode 100644 index da7546b5a1..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/json/codec.go +++ /dev/null @@ -1,17 +0,0 @@ -package json - -import ( - "encoding/json" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - // TODO: expose prefix and indent in the Codec as setting? - return json.MarshalIndent(v, "", " ") -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return json.Unmarshal(b, &v) -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go deleted file mode 100644 index c70aa8d280..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go +++ /dev/null @@ -1,16 +0,0 @@ -package toml - -import ( - "github.com/pelletier/go-toml/v2" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - return toml.Marshal(v) -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return toml.Unmarshal(b, &v) -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go deleted file mode 100644 index 0368792499..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package yaml - -import "gopkg.in/yaml.v3" - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - return yaml.Marshal(v) -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return yaml.Unmarshal(b, &v) -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/features/bind_struct.go b/openshift/tools/vendor/github.com/spf13/viper/internal/features/bind_struct.go deleted file mode 100644 index 89302c2164..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/features/bind_struct.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build viper_bind_struct - -package features - -const BindStruct = true diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go b/openshift/tools/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go deleted file mode 100644 index edfaf73b64..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !viper_bind_struct - -package features - -const BindStruct = false diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/features/finder.go b/openshift/tools/vendor/github.com/spf13/viper/internal/features/finder.go deleted file mode 100644 index 983ea3a9d7..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/features/finder.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build viper_finder - -package features - -const Finder = true diff --git a/openshift/tools/vendor/github.com/spf13/viper/internal/features/finder_default.go b/openshift/tools/vendor/github.com/spf13/viper/internal/features/finder_default.go deleted file mode 100644 index 89bcb06ee1..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/internal/features/finder_default.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !viper_finder - -package features - -const Finder = false diff --git a/openshift/tools/vendor/github.com/spf13/viper/logger.go b/openshift/tools/vendor/github.com/spf13/viper/logger.go deleted file mode 100644 index 828042f29a..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/logger.go +++ /dev/null @@ -1,31 +0,0 @@ -package viper - -import ( - "context" - "log/slog" -) - -// WithLogger sets a custom logger. -func WithLogger(l *slog.Logger) Option { - return optionFunc(func(v *Viper) { - v.logger = l - }) -} - -type discardHandler struct{} - -func (n *discardHandler) Enabled(_ context.Context, _ slog.Level) bool { - return false -} - -func (n *discardHandler) Handle(_ context.Context, _ slog.Record) error { - return nil -} - -func (n *discardHandler) WithAttrs(_ []slog.Attr) slog.Handler { - return n -} - -func (n *discardHandler) WithGroup(_ string) slog.Handler { - return n -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/remote.go b/openshift/tools/vendor/github.com/spf13/viper/remote.go deleted file mode 100644 index bdde7de267..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/remote.go +++ /dev/null @@ -1,256 +0,0 @@ -package viper - -import ( - "bytes" - "fmt" - "io" - "reflect" - "slices" -) - -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} - -func resetRemote() { - SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} -} - -type remoteConfigFactory interface { - Get(rp RemoteProvider) (io.Reader, error) - Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) -} - -type RemoteResponse struct { - Value []byte - Error error -} - -// RemoteConfig is optional, see the remote package. -var RemoteConfig remoteConfigFactory - -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. -type UnsupportedRemoteProviderError string - -// Error returns the formatted remote provider error. -func (str UnsupportedRemoteProviderError) Error() string { - return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) -} - -// RemoteConfigError denotes encountering an error while trying to -// pull the configuration from the remote provider. -type RemoteConfigError string - -// Error returns the formatted remote provider error. -func (rce RemoteConfigError) Error() string { - return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) -} - -type defaultRemoteProvider struct { - provider string - endpoint string - path string - secretKeyring string -} - -func (rp defaultRemoteProvider) Provider() string { - return rp.provider -} - -func (rp defaultRemoteProvider) Endpoint() string { - return rp.endpoint -} - -func (rp defaultRemoteProvider) Path() string { - return rp.path -} - -func (rp defaultRemoteProvider) SecretKeyring() string { - return rp.secretKeyring -} - -// RemoteProvider stores the configuration necessary -// to connect to a remote key/value store. -// Optional secretKeyring to unencrypt encrypted values -// can be provided. -type RemoteProvider interface { - Provider() string - Endpoint() string - Path() string - SecretKeyring() string -} - -// AddRemoteProvider adds a remote configuration source. -// Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -func AddRemoteProvider(provider, endpoint, path string) error { - return v.AddRemoteProvider(provider, endpoint, path) -} - -func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { - if !slices.Contains(SupportedRemoteProviders, provider) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -// AddSecureRemoteProvider adds a remote configuration source. -// Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. -func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) -} - -func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - if !slices.Contains(SupportedRemoteProviders, provider) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - secretKeyring: secretkeyring, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { - for _, y := range v.remoteProviders { - if reflect.DeepEqual(y, p) { - return true - } - } - return false -} - -// ReadRemoteConfig attempts to get configuration from a remote source -// and read it in the remote configuration registry. -func ReadRemoteConfig() error { return v.ReadRemoteConfig() } - -func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() -} - -func WatchRemoteConfig() error { return v.WatchRemoteConfig() } -func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() -} - -// Retrieve the first found remote configuration. -func (v *Viper) getKeyValueConfig() error { - if RemoteConfig == nil { - return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") - } - - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.getRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) - - continue - } - - v.kvstore = val - - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Get(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - // Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfig() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.watchRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) - - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Watch(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/util.go b/openshift/tools/vendor/github.com/spf13/viper/util.go deleted file mode 100644 index 2a08074bc7..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/util.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is a application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -package viper - -import ( - "fmt" - "log/slog" - "os" - "path/filepath" - "runtime" - "strings" - "unicode" - - "github.com/spf13/cast" -) - -// ConfigParseError denotes failing to parse configuration file. -type ConfigParseError struct { - err error -} - -// Error returns the formatted configuration error. -func (pe ConfigParseError) Error() string { - return fmt.Sprintf("While parsing config: %s", pe.err.Error()) -} - -// Unwrap returns the wrapped error. -func (pe ConfigParseError) Unwrap() error { - return pe.err -} - -// toCaseInsensitiveValue checks if the value is a map; -// if so, create a copy and lower-case the keys recursively. -func toCaseInsensitiveValue(value any) any { - switch v := value.(type) { - case map[any]any: - value = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]any: - value = copyAndInsensitiviseMap(v) - } - - return value -} - -// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of -// any map it makes case insensitive. -func copyAndInsensitiviseMap(m map[string]any) map[string]any { - nm := make(map[string]any) - - for key, val := range m { - lkey := strings.ToLower(key) - switch v := val.(type) { - case map[any]any: - nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]any: - nm[lkey] = copyAndInsensitiviseMap(v) - default: - nm[lkey] = v - } - } - - return nm -} - -func insensitiviseVal(val any) any { - switch v := val.(type) { - case map[any]any: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]any)) - case map[string]any: - // nested map: recursively insensitivise - insensitiviseMap(v) - case []any: - // nested array: recursively insensitivise - insensitiveArray(v) - } - return val -} - -func insensitiviseMap(m map[string]any) { - for key, val := range m { - val = insensitiviseVal(val) - lower := strings.ToLower(key) - if key != lower { - // remove old key (not lower-cased) - delete(m, key) - } - // update map - m[lower] = val - } -} - -func insensitiveArray(a []any) { - for i, val := range a { - a[i] = insensitiviseVal(val) - } -} - -func absPathify(logger *slog.Logger, inPath string) string { - logger.Info("trying to resolve absolute path", "path", inPath) - - if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { - inPath = userHomeDir() + inPath[5:] - } - - inPath = os.ExpandEnv(inPath) - - if filepath.IsAbs(inPath) { - return filepath.Clean(inPath) - } - - p, err := filepath.Abs(inPath) - if err == nil { - return filepath.Clean(p) - } - - logger.Error(fmt.Errorf("could not discover absolute path: %w", err).Error()) - - return "" -} - -func userHomeDir() string { - if runtime.GOOS == "windows" { - home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - if home == "" { - home = os.Getenv("USERPROFILE") - } - return home - } - return os.Getenv("HOME") -} - -func safeMul(a, b uint) uint { - c := a * b - if a > 1 && b > 1 && c/b != a { - return 0 - } - return c -} - -// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes. -func parseSizeInBytes(sizeStr string) uint { - sizeStr = strings.TrimSpace(sizeStr) - lastChar := len(sizeStr) - 1 - multiplier := uint(1) - - if lastChar > 0 { - if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { - if lastChar > 1 { - switch unicode.ToLower(rune(sizeStr[lastChar-1])) { - case 'k': - multiplier = 1 << 10 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'm': - multiplier = 1 << 20 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'g': - multiplier = 1 << 30 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - default: - multiplier = 1 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - } - } - } - } - - size := cast.ToInt(sizeStr) - if size < 0 { - size = 0 - } - - return safeMul(uint(size), multiplier) -} - -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} diff --git a/openshift/tools/vendor/github.com/spf13/viper/viper.go b/openshift/tools/vendor/github.com/spf13/viper/viper.go deleted file mode 100644 index f900e58b1a..0000000000 --- a/openshift/tools/vendor/github.com/spf13/viper/viper.go +++ /dev/null @@ -1,2031 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is an application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -// Each item takes precedence over the item below it: - -// overrides -// flag -// env -// config -// key/value store -// default - -package viper - -import ( - "bytes" - "encoding/csv" - "errors" - "fmt" - "io" - "log/slog" - "os" - "path/filepath" - "reflect" - "slices" - "strconv" - "strings" - "sync" - "time" - - "github.com/fsnotify/fsnotify" - "github.com/go-viper/mapstructure/v2" - "github.com/spf13/afero" - "github.com/spf13/cast" - "github.com/spf13/pflag" - - "github.com/spf13/viper/internal/features" -) - -// ConfigMarshalError happens when failing to marshal the configuration. -type ConfigMarshalError struct { - err error -} - -// Error returns the formatted configuration error. -func (e ConfigMarshalError) Error() string { - return fmt.Sprintf("While marshaling config: %s", e.err.Error()) -} - -var v *Viper - -func init() { - v = New() -} - -// UnsupportedConfigError denotes encountering an unsupported -// configuration filetype. -type UnsupportedConfigError string - -// Error returns the formatted configuration error. -func (str UnsupportedConfigError) Error() string { - return fmt.Sprintf("Unsupported Config Type %q", string(str)) -} - -// ConfigFileNotFoundError denotes failing to find configuration file. -type ConfigFileNotFoundError struct { - name, locations string -} - -// Error returns the formatted configuration error. -func (fnfe ConfigFileNotFoundError) Error() string { - return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) -} - -// ConfigFileAlreadyExistsError denotes failure to write new configuration file. -type ConfigFileAlreadyExistsError string - -// Error returns the formatted error when configuration already exists. -func (faee ConfigFileAlreadyExistsError) Error() string { - return fmt.Sprintf("Config File %q Already Exists", string(faee)) -} - -// A DecoderConfigOption can be passed to viper.Unmarshal to configure -// mapstructure.DecoderConfig options. -type DecoderConfigOption func(*mapstructure.DecoderConfig) - -// DecodeHook returns a DecoderConfigOption which overrides the default -// DecoderConfig.DecodeHook value, the default is: -// -// mapstructure.ComposeDecodeHookFunc( -// mapstructure.StringToTimeDurationHookFunc(), -// mapstructure.StringToSliceHookFunc(","), -// ) -func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { - return func(c *mapstructure.DecoderConfig) { - c.DecodeHook = hook - } -} - -// Viper is a prioritized configuration registry. It -// maintains a set of configuration sources, fetches -// values to populate those, and provides them according -// to the source's priority. -// The priority of the sources is the following: -// 1. overrides -// 2. flags -// 3. env. variables -// 4. config file -// 5. key/value store -// 6. defaults -// -// For example, if values from the following sources were loaded: -// -// Defaults : { -// "secret": "", -// "user": "default", -// "endpoint": "https://localhost" -// } -// Config : { -// "user": "root" -// "secret": "defaultsecret" -// } -// Env : { -// "secret": "somesecretkey" -// } -// -// The resulting config will have the following values: -// -// { -// "secret": "somesecretkey", -// "user": "root", -// "endpoint": "https://localhost" -// } -// -// Note: Vipers are not safe for concurrent Get() and Set() operations. -type Viper struct { - // Delimiter that separates a list of keys - // used to access a nested value in one go - keyDelim string - - // A set of paths to look for the config file in - configPaths []string - - // The filesystem to read config from. - fs afero.Fs - - finder Finder - - // A set of remote providers to search for the configuration - remoteProviders []*defaultRemoteProvider - - // Name of file to look for inside the path - configName string - configFile string - configType string - configPermissions os.FileMode - envPrefix string - - automaticEnvApplied bool - envKeyReplacer StringReplacer - allowEmptyEnv bool - - parents []string - config map[string]any - override map[string]any - defaults map[string]any - kvstore map[string]any - pflags map[string]FlagValue - env map[string][]string - aliases map[string]string - typeByDefValue bool - - onConfigChange func(fsnotify.Event) - - logger *slog.Logger - - encoderRegistry EncoderRegistry - decoderRegistry DecoderRegistry - - decodeHook mapstructure.DecodeHookFunc - - experimentalFinder bool - experimentalBindStruct bool -} - -// New returns an initialized Viper instance. -func New() *Viper { - v := new(Viper) - v.keyDelim = "." - v.configName = "config" - v.configPermissions = os.FileMode(0o644) - v.fs = afero.NewOsFs() - v.config = make(map[string]any) - v.parents = []string{} - v.override = make(map[string]any) - v.defaults = make(map[string]any) - v.kvstore = make(map[string]any) - v.pflags = make(map[string]FlagValue) - v.env = make(map[string][]string) - v.aliases = make(map[string]string) - v.typeByDefValue = false - v.logger = slog.New(&discardHandler{}) - - codecRegistry := NewCodecRegistry() - - v.encoderRegistry = codecRegistry - v.decoderRegistry = codecRegistry - - v.experimentalFinder = features.Finder - v.experimentalBindStruct = features.BindStruct - - return v -} - -// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney. -// If you're unfamiliar with this style, -// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and -// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis. -type Option interface { - apply(v *Viper) -} - -type optionFunc func(v *Viper) - -func (fn optionFunc) apply(v *Viper) { - fn(v) -} - -// KeyDelimiter sets the delimiter used for determining key parts. -// By default it's value is ".". -func KeyDelimiter(d string) Option { - return optionFunc(func(v *Viper) { - v.keyDelim = d - }) -} - -// StringReplacer applies a set of replacements to a string. -type StringReplacer interface { - // Replace returns a copy of s with all replacements performed. - Replace(s string) string -} - -// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys. -func EnvKeyReplacer(r StringReplacer) Option { - return optionFunc(func(v *Viper) { - if r == nil { - return - } - - v.envKeyReplacer = r - }) -} - -// WithDecodeHook sets a default decode hook for mapstructure. -func WithDecodeHook(h mapstructure.DecodeHookFunc) Option { - return optionFunc(func(v *Viper) { - if h == nil { - return - } - - v.decodeHook = h - }) -} - -// NewWithOptions creates a new Viper instance. -func NewWithOptions(opts ...Option) *Viper { - v := New() - - for _, opt := range opts { - opt.apply(v) - } - - return v -} - -// SetOptions sets the options on the global Viper instance. -// -// Be careful when using this function: subsequent calls may override options you set. -// It's always better to use a local Viper instance. -func SetOptions(opts ...Option) { - for _, opt := range opts { - opt.apply(v) - } -} - -// Reset is intended for testing, will reset all to default settings. -// In the public interface for the viper package so applications -// can use it in their testing as well. -func Reset() { - v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - - resetRemote() -} - -// SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - -// OnConfigChange sets the event handler that is called when a config file changes. -func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } - -// OnConfigChange sets the event handler that is called when a config file changes. -func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { - v.onConfigChange = run -} - -// WatchConfig starts watching a config file for changes. -func WatchConfig() { v.WatchConfig() } - -// WatchConfig starts watching a config file for changes. -func (v *Viper) WatchConfig() { - initWG := sync.WaitGroup{} - initWG.Add(1) - go func() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - v.logger.Error(fmt.Sprintf("failed to create watcher: %s", err)) - os.Exit(1) - } - defer watcher.Close() - // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way - filename, err := v.getConfigFile() - if err != nil { - v.logger.Error(fmt.Sprintf("get config file: %s", err)) - initWG.Done() - return - } - - configFile := filepath.Clean(filename) - configDir, _ := filepath.Split(configFile) - realConfigFile, _ := filepath.EvalSymlinks(filename) - - eventsWG := sync.WaitGroup{} - eventsWG.Add(1) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { // 'Events' channel is closed - eventsWG.Done() - return - } - currentConfigFile, _ := filepath.EvalSymlinks(filename) - // we only care about the config file with the following cases: - // 1 - if the config file was modified or created - // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement) - if (filepath.Clean(event.Name) == configFile && - (event.Has(fsnotify.Write) || event.Has(fsnotify.Create))) || - (currentConfigFile != "" && currentConfigFile != realConfigFile) { - realConfigFile = currentConfigFile - err := v.ReadInConfig() - if err != nil { - v.logger.Error(fmt.Sprintf("read config file: %s", err)) - } - if v.onConfigChange != nil { - v.onConfigChange(event) - } - } else if filepath.Clean(event.Name) == configFile && event.Has(fsnotify.Remove) { - eventsWG.Done() - return - } - - case err, ok := <-watcher.Errors: - if ok { // 'Errors' channel is not closed - v.logger.Error(fmt.Sprintf("watcher error: %s", err)) - } - eventsWG.Done() - return - } - } - }() - watcher.Add(configDir) - initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on... - eventsWG.Wait() // now, wait for event loop to end in this go-routine... - }() - initWG.Wait() // make sure that the go routine above fully ended before returning -} - -// SetConfigFile explicitly defines the path, name and extension of the config file. -// Viper will use this and not check any of the config paths. -func SetConfigFile(in string) { v.SetConfigFile(in) } - -func (v *Viper) SetConfigFile(in string) { - if in != "" { - v.configFile = in - } -} - -// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. -// E.g. if your prefix is "spf", the env registry will look for env -// variables that start with "SPF_". -func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } - -func (v *Viper) SetEnvPrefix(in string) { - if in != "" { - v.envPrefix = in - } -} - -func GetEnvPrefix() string { return v.GetEnvPrefix() } - -func (v *Viper) GetEnvPrefix() string { - return v.envPrefix -} - -func (v *Viper) mergeWithEnvPrefix(in string) string { - if v.envPrefix != "" { - return strings.ToUpper(v.envPrefix + "_" + in) - } - - return strings.ToUpper(in) -} - -// AllowEmptyEnv tells Viper to consider set, -// but empty environment variables as valid values instead of falling back. -// For backward compatibility reasons this is false by default. -func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } - -func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { - v.allowEmptyEnv = allowEmptyEnv -} - -// TODO: should getEnv logic be moved into find(). Can generalize the use of -// rewriting keys many things, Ex: Get('someKey') -> some_key -// (camel case to snake case for JSON keys perhaps) - -// getEnv is a wrapper around os.Getenv which replaces characters in the original -// key. This allows env vars which have different keys than the config object -// keys. -func (v *Viper) getEnv(key string) (string, bool) { - if v.envKeyReplacer != nil { - key = v.envKeyReplacer.Replace(key) - } - - val, ok := os.LookupEnv(key) - - return val, ok && (v.allowEmptyEnv || val != "") -} - -// ConfigFileUsed returns the file used to populate the config registry. -func ConfigFileUsed() string { return v.ConfigFileUsed() } -func (v *Viper) ConfigFileUsed() string { return v.configFile } - -// AddConfigPath adds a path for Viper to search for the config file in. -// Can be called multiple times to define multiple search paths. -func AddConfigPath(in string) { v.AddConfigPath(in) } - -func (v *Viper) AddConfigPath(in string) { - if v.finder != nil { - v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "AddConfigPath")) - } - - if in != "" { - absin := absPathify(v.logger, in) - - v.logger.Info("adding path to search paths", "path", absin) - if !slices.Contains(v.configPaths, absin) { - v.configPaths = append(v.configPaths, absin) - } - } -} - -// searchMap recursively searches for a value for path in source map. -// Returns nil if not found. -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMap(source map[string]any, path []string) any { - if len(path) == 0 { - return source - } - - next, ok := source[path[0]] - if ok { - // Fast path - if len(path) == 1 { - return next - } - - // Nested case - switch next := next.(type) { - case map[any]any: - return v.searchMap(cast.ToStringMap(next), path[1:]) - case map[string]any: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - return v.searchMap(next, path[1:]) - default: - // got a value but nested key expected, return "nil" for not found - return nil - } - } - return nil -} - -// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice. -// -// While searchMap() considers each path element as a single map key or slice index, this -// function searches for, and prioritizes, merged path elements. -// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" -// is also defined, this latter value is returned for path ["foo", "bar"]. -// -// This should be useful only at config level (other maps may not contain dots -// in their keys). -// -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchIndexableWithPathPrefixes(source any, path []string) any { - if len(path) == 0 { - return source - } - - // search for path prefixes, starting from the longest one - for i := len(path); i > 0; i-- { - prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - - var val any - switch sourceIndexable := source.(type) { - case []any: - val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) - case map[string]any: - val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) - } - if val != nil { - return val - } - } - - // not found - return nil -} - -// searchSliceWithPathPrefixes searches for a value for path in sourceSlice -// -// This function is part of the searchIndexableWithPathPrefixes recurring search and -// should not be called directly from functions other than searchIndexableWithPathPrefixes. -func (v *Viper) searchSliceWithPathPrefixes( - sourceSlice []any, - prefixKey string, - pathIndex int, - path []string, -) any { - // if the prefixKey is not a number or it is out of bounds of the slice - index, err := strconv.Atoi(prefixKey) - if err != nil || len(sourceSlice) <= index { - return nil - } - - next := sourceSlice[index] - - // Fast path - if pathIndex == len(path) { - return next - } - - switch n := next.(type) { - case map[any]any: - return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]any, []any: - return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - - // not found - return nil -} - -// searchMapWithPathPrefixes searches for a value for path in sourceMap -// -// This function is part of the searchIndexableWithPathPrefixes recurring search and -// should not be called directly from functions other than searchIndexableWithPathPrefixes. -func (v *Viper) searchMapWithPathPrefixes( - sourceMap map[string]any, - prefixKey string, - pathIndex int, - path []string, -) any { - next, ok := sourceMap[prefixKey] - if !ok { - return nil - } - - // Fast path - if pathIndex == len(path) { - return next - } - - // Nested case - switch n := next.(type) { - case map[any]any: - return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]any, []any: - return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - - // not found - return nil -} - -// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere -// on its path in the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]any) string { - var parentVal any - for i := 1; i < len(path); i++ { - parentVal = v.searchMap(m, path[0:i]) - if parentVal == nil { - // not found, no need to add more path elements - return "" - } - switch parentVal.(type) { - case map[any]any: - continue - case map[string]any: - continue - default: - // parentVal is a regular value which shadows "path" - return strings.Join(path[0:i], v.keyDelim) - } - } - return "" -} - -// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere -// in a sub-path of the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInFlatMap(path []string, mi any) string { - // unify input map - var m map[string]interface{} - switch miv := mi.(type) { - case map[string]string: - m = castMapStringToMapInterface(miv) - case map[string]FlagValue: - m = castMapFlagToMapInterface(miv) - default: - return "" - } - - // scan paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := m[parentKey]; ok { - return parentKey - } - } - return "" -} - -// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere -// in the environment, when automatic env is on. -// e.g., if "foo.bar" has a value in the environment, it “shadows” -// -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInAutoEnv(path []string) string { - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok { - return parentKey - } - } - return "" -} - -// SetTypeByDefaultValue enables or disables the inference of a key value's -// type when the Get function is used based upon a key's default value as -// opposed to the value returned based on the normal fetch logic. -// -// For example, if a key has a default value of []string{} and the same key -// is set via an environment variable to "a b c", a call to the Get function -// would return a string slice for the key if the key's type is inferred by -// the default value and the Get function would return: -// -// []string {"a", "b", "c"} -// -// Otherwise the Get function would return: -// -// "a b c" -func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } - -func (v *Viper) SetTypeByDefaultValue(enable bool) { - v.typeByDefValue = enable -} - -// GetViper gets the global Viper instance. -func GetViper() *Viper { - return v -} - -// Get can retrieve any value given the key to use. -// Get is case-insensitive for a key. -// Get has the behavior of returning the value associated with the first -// place from where it is set. Viper will check in the following order: -// override, flag, env, config file, key/value store, default -// -// Get returns an interface. For a specific value use one of the Get____ methods. -func Get(key string) any { return v.Get(key) } - -func (v *Viper) Get(key string) any { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey, true) - if val == nil { - return nil - } - - if v.typeByDefValue { - // TODO(bep) this branch isn't covered by a single test. - valType := val - path := strings.Split(lcaseKey, v.keyDelim) - defVal := v.searchMap(v.defaults, path) - if defVal != nil { - valType = defVal - } - - switch valType.(type) { - case bool: - return cast.ToBool(val) - case string: - return cast.ToString(val) - case int32, int16, int8, int: - return cast.ToInt(val) - case uint: - return cast.ToUint(val) - case uint32: - return cast.ToUint32(val) - case uint64: - return cast.ToUint64(val) - case int64: - return cast.ToInt64(val) - case float64, float32: - return cast.ToFloat64(val) - case time.Time: - return cast.ToTime(val) - case time.Duration: - return cast.ToDuration(val) - case []string: - return cast.ToStringSlice(val) - case []int: - return cast.ToIntSlice(val) - case []time.Duration: - return cast.ToDurationSlice(val) - } - } - - return val -} - -// Sub returns new Viper instance representing a sub tree of this instance. -// Sub is case-insensitive for a key. -func Sub(key string) *Viper { return v.Sub(key) } - -func (v *Viper) Sub(key string) *Viper { - subv := New() - data := v.Get(key) - if data == nil { - return nil - } - - if reflect.TypeOf(data).Kind() == reflect.Map { - subv.parents = append([]string(nil), v.parents...) - subv.parents = append(subv.parents, strings.ToLower(key)) - subv.automaticEnvApplied = v.automaticEnvApplied - subv.envPrefix = v.envPrefix - subv.envKeyReplacer = v.envKeyReplacer - subv.keyDelim = v.keyDelim - subv.config = cast.ToStringMap(data) - return subv - } - return nil -} - -// GetString returns the value associated with the key as a string. -func GetString(key string) string { return v.GetString(key) } - -func (v *Viper) GetString(key string) string { - return cast.ToString(v.Get(key)) -} - -// GetBool returns the value associated with the key as a boolean. -func GetBool(key string) bool { return v.GetBool(key) } - -func (v *Viper) GetBool(key string) bool { - return cast.ToBool(v.Get(key)) -} - -// GetInt returns the value associated with the key as an integer. -func GetInt(key string) int { return v.GetInt(key) } - -func (v *Viper) GetInt(key string) int { - return cast.ToInt(v.Get(key)) -} - -// GetInt32 returns the value associated with the key as an integer. -func GetInt32(key string) int32 { return v.GetInt32(key) } - -func (v *Viper) GetInt32(key string) int32 { - return cast.ToInt32(v.Get(key)) -} - -// GetInt64 returns the value associated with the key as an integer. -func GetInt64(key string) int64 { return v.GetInt64(key) } - -func (v *Viper) GetInt64(key string) int64 { - return cast.ToInt64(v.Get(key)) -} - -// GetUint8 returns the value associated with the key as an unsigned integer. -func GetUint8(key string) uint8 { return v.GetUint8(key) } - -func (v *Viper) GetUint8(key string) uint8 { - return cast.ToUint8(v.Get(key)) -} - -// GetUint returns the value associated with the key as an unsigned integer. -func GetUint(key string) uint { return v.GetUint(key) } - -func (v *Viper) GetUint(key string) uint { - return cast.ToUint(v.Get(key)) -} - -// GetUint16 returns the value associated with the key as an unsigned integer. -func GetUint16(key string) uint16 { return v.GetUint16(key) } - -func (v *Viper) GetUint16(key string) uint16 { - return cast.ToUint16(v.Get(key)) -} - -// GetUint32 returns the value associated with the key as an unsigned integer. -func GetUint32(key string) uint32 { return v.GetUint32(key) } - -func (v *Viper) GetUint32(key string) uint32 { - return cast.ToUint32(v.Get(key)) -} - -// GetUint64 returns the value associated with the key as an unsigned integer. -func GetUint64(key string) uint64 { return v.GetUint64(key) } - -func (v *Viper) GetUint64(key string) uint64 { - return cast.ToUint64(v.Get(key)) -} - -// GetFloat64 returns the value associated with the key as a float64. -func GetFloat64(key string) float64 { return v.GetFloat64(key) } - -func (v *Viper) GetFloat64(key string) float64 { - return cast.ToFloat64(v.Get(key)) -} - -// GetTime returns the value associated with the key as time. -func GetTime(key string) time.Time { return v.GetTime(key) } - -func (v *Viper) GetTime(key string) time.Time { - return cast.ToTime(v.Get(key)) -} - -// GetDuration returns the value associated with the key as a duration. -func GetDuration(key string) time.Duration { return v.GetDuration(key) } - -func (v *Viper) GetDuration(key string) time.Duration { - return cast.ToDuration(v.Get(key)) -} - -// GetIntSlice returns the value associated with the key as a slice of int values. -func GetIntSlice(key string) []int { return v.GetIntSlice(key) } - -func (v *Viper) GetIntSlice(key string) []int { - return cast.ToIntSlice(v.Get(key)) -} - -// GetStringSlice returns the value associated with the key as a slice of strings. -func GetStringSlice(key string) []string { return v.GetStringSlice(key) } - -func (v *Viper) GetStringSlice(key string) []string { - return cast.ToStringSlice(v.Get(key)) -} - -// GetStringMap returns the value associated with the key as a map of interfaces. -func GetStringMap(key string) map[string]any { return v.GetStringMap(key) } - -func (v *Viper) GetStringMap(key string) map[string]any { - return cast.ToStringMap(v.Get(key)) -} - -// GetStringMapString returns the value associated with the key as a map of strings. -func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } - -func (v *Viper) GetStringMapString(key string) map[string]string { - return cast.ToStringMapString(v.Get(key)) -} - -// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. -func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } - -func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { - return cast.ToStringMapStringSlice(v.Get(key)) -} - -// GetSizeInBytes returns the size of the value associated with the given key -// in bytes. -func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } - -func (v *Viper) GetSizeInBytes(key string) uint { - sizeStr := cast.ToString(v.Get(key)) - return parseSizeInBytes(sizeStr) -} - -// UnmarshalKey takes a single key and unmarshals it into a Struct. -func UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { - return v.UnmarshalKey(key, rawVal, opts...) -} - -func (v *Viper) UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { - return decode(v.Get(key), v.defaultDecoderConfig(rawVal, opts...)) -} - -// Unmarshal unmarshals the config into a Struct. Make sure that the tags -// on the fields of the structure are properly set. -func Unmarshal(rawVal any, opts ...DecoderConfigOption) error { - return v.Unmarshal(rawVal, opts...) -} - -func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { - keys := v.AllKeys() - - if v.experimentalBindStruct { - // TODO: make this optional? - structKeys, err := v.decodeStructKeys(rawVal, opts...) - if err != nil { - return err - } - - keys = append(keys, structKeys...) - } - - // TODO: struct keys should be enough? - return decode(v.getSettings(keys), v.defaultDecoderConfig(rawVal, opts...)) -} - -func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]string, error) { - var structKeyMap map[string]any - - err := decode(input, v.defaultDecoderConfig(&structKeyMap, opts...)) - if err != nil { - return nil, err - } - - flattenedStructKeyMap := v.flattenAndMergeMap(map[string]bool{}, structKeyMap, "") - - r := make([]string, 0, len(flattenedStructKeyMap)) - for v := range flattenedStructKeyMap { - r = append(r, v) - } - - return r, nil -} - -// defaultDecoderConfig returns default mapstructure.DecoderConfig with support -// of time.Duration values & string slices. -func (v *Viper) defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { - decodeHook := v.decodeHook - if decodeHook == nil { - decodeHook = mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - // mapstructure.StringToSliceHookFunc(","), - stringToWeakSliceHookFunc(","), - ) - } - - c := &mapstructure.DecoderConfig{ - Metadata: nil, - WeaklyTypedInput: true, - DecodeHook: decodeHook, - } - - for _, opt := range opts { - opt(c) - } - - // Do not allow overwriting the output - c.Result = output - - return c -} - -// As of mapstructure v2.0.0 StringToSliceHookFunc checks if the return type is a string slice. -// This function removes that check. -// TODO: implement a function that checks if the value can be converted to the return type and use it instead. -func stringToWeakSliceHookFunc(sep string) mapstructure.DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}, - ) (interface{}, error) { - if f.Kind() != reflect.String || t.Kind() != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// decode is a wrapper around mapstructure.Decode that mimics the WeakDecode functionality. -func decode(input any, config *mapstructure.DecoderConfig) error { - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - return decoder.Decode(input) -} - -// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent -// in the destination struct. -func UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { - return v.UnmarshalExact(rawVal, opts...) -} - -func (v *Viper) UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { - config := v.defaultDecoderConfig(rawVal, opts...) - config.ErrorUnused = true - - keys := v.AllKeys() - - if v.experimentalBindStruct { - // TODO: make this optional? - structKeys, err := v.decodeStructKeys(rawVal, opts...) - if err != nil { - return err - } - - keys = append(keys, structKeys...) - } - - // TODO: struct keys should be enough? - return decode(v.getSettings(keys), config) -} - -// BindPFlags binds a full flag set to the configuration, using each flag's long -// name as the config key. -func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } - -func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { - return v.BindFlagValues(pflagValueSet{flags}) -} - -// BindPFlag binds a specific key to a pflag (as used by cobra). -// Example (where serverCmd is a Cobra instance): -// -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } - -func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { - if flag == nil { - return fmt.Errorf("flag for %q is nil", key) - } - return v.BindFlagValue(key, pflagValue{flag}) -} - -// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long -// name as the config key. -func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } - -func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { - flags.VisitAll(func(flag FlagValue) { - if err = v.BindFlagValue(flag.Name(), flag); err != nil { - return - } - }) - return nil -} - -// BindFlagValue binds a specific key to a FlagValue. -func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } - -func (v *Viper) BindFlagValue(key string, flag FlagValue) error { - if flag == nil { - return fmt.Errorf("flag for %q is nil", key) - } - v.pflags[strings.ToLower(key)] = flag - return nil -} - -// BindEnv binds a Viper key to a ENV variable. -// ENV variables are case sensitive. -// If only a key is provided, it will use the env key matching the key, uppercased. -// If more arguments are provided, they will represent the env variable names that -// should bind to this key and will be taken in the specified order. -// EnvPrefix will be used when set when env name is not provided. -func BindEnv(input ...string) error { return v.BindEnv(input...) } - -func (v *Viper) BindEnv(input ...string) error { - if len(input) == 0 { - return fmt.Errorf("missing key to bind to") - } - - key := strings.ToLower(input[0]) - - if len(input) == 1 { - v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key)) - } else { - v.env[key] = append(v.env[key], input[1:]...) - } - - return nil -} - -// MustBindEnv wraps BindEnv in a panic. -// If there is an error binding an environment variable, MustBindEnv will -// panic. -func MustBindEnv(input ...string) { v.MustBindEnv(input...) } - -func (v *Viper) MustBindEnv(input ...string) { - if err := v.BindEnv(input...); err != nil { - panic(fmt.Sprintf("error while binding environment variable: %v", err)) - } -} - -// Given a key, find the value. -// -// Viper will check to see if an alias exists first. -// Viper will then check in the following order: -// flag, env, config file, key/value store. -// Lastly, if no value was found and flagDefault is true, and if the key -// corresponds to a flag, the flag's default value is returned. -// -// Note: this assumes a lower-cased key given. -func (v *Viper) find(lcaseKey string, flagDefault bool) any { - var ( - val any - exists bool - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - ) - - // compute the path through the nested maps to the nested value - if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { - return nil - } - - // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - - // Set() override first - val = v.searchMap(v.override, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { - return nil - } - - // PFlag override next - flag, exists := v.pflags[lcaseKey] - if exists && flag.HasChanged() { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice", "stringArray": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - case "intSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return cast.ToIntSlice(res) - case "durationSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - slice := strings.Split(s, ",") - return cast.ToDurationSlice(slice) - case "stringToString": - return stringToStringConv(flag.ValueString()) - case "stringToInt": - return stringToIntConv(flag.ValueString()) - default: - return flag.ValueString() - } - } - if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { - return nil - } - - // Env override next - if v.automaticEnvApplied { - envKey := strings.Join(append(v.parents, lcaseKey), ".") - // even if it hasn't been registered, if automaticEnv is used, - // check any Get request - if val, ok := v.getEnv(v.mergeWithEnvPrefix(envKey)); ok { - return val - } - if nested && v.isPathShadowedInAutoEnv(path) != "" { - return nil - } - } - envkeys, exists := v.env[lcaseKey] - if exists { - for _, envkey := range envkeys { - if val, ok := v.getEnv(envkey); ok { - return val - } - } - } - if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { - return nil - } - - // Config file next - val = v.searchIndexableWithPathPrefixes(v.config, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { - return nil - } - - // K/V store next - val = v.searchMap(v.kvstore, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { - return nil - } - - // Default next - val = v.searchMap(v.defaults, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { - return nil - } - - if flagDefault { - // last chance: if no value is found and a flag does exist for the key, - // get the flag's default value even if the flag's value has not been set. - if flag, exists := v.pflags[lcaseKey]; exists { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice", "stringArray": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - case "intSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return cast.ToIntSlice(res) - case "stringToString": - return stringToStringConv(flag.ValueString()) - case "stringToInt": - return stringToIntConv(flag.ValueString()) - case "durationSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - slice := strings.Split(s, ",") - return cast.ToDurationSlice(slice) - default: - return flag.ValueString() - } - } - // last item, no need to check shadowing - } - - return nil -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79 -// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. -func stringToStringConv(val string) any { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if val == "" { - return map[string]any{} - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil - } - out := make(map[string]any, len(ss)) - for _, pair := range ss { - k, vv, found := strings.Cut(pair, "=") - if !found { - return nil - } - out[k] = vv - } - return out -} - -// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/d5e0c0615acee7028e1e2740a11102313be88de1/string_to_int.go#L68 -// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. -func stringToIntConv(val string) any { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if val == "" { - return map[string]any{} - } - ss := strings.Split(val, ",") - out := make(map[string]any, len(ss)) - for _, pair := range ss { - k, vv, found := strings.Cut(pair, "=") - if !found { - return nil - } - var err error - out[k], err = strconv.Atoi(vv) - if err != nil { - return nil - } - } - return out -} - -// IsSet checks to see if the key has been set in any of the data locations. -// IsSet is case-insensitive for a key. -func IsSet(key string) bool { return v.IsSet(key) } - -func (v *Viper) IsSet(key string) bool { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey, false) - return val != nil -} - -// AutomaticEnv makes Viper check if environment variables match any of the existing keys -// (config, default or flags). If matching env vars are found, they are loaded into Viper. -func AutomaticEnv() { v.AutomaticEnv() } - -func (v *Viper) AutomaticEnv() { - v.automaticEnvApplied = true -} - -// SetEnvKeyReplacer sets the strings.Replacer on the viper object -// Useful for mapping an environmental variable to a key that does -// not match it. -func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } - -func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { - v.envKeyReplacer = r -} - -// RegisterAlias creates an alias that provides another accessor for the same key. -// This enables one to change a name without breaking the application. -func RegisterAlias(alias, key string) { v.RegisterAlias(alias, key) } - -func (v *Viper) RegisterAlias(alias, key string) { - v.registerAlias(alias, strings.ToLower(key)) -} - -func (v *Viper) registerAlias(alias, key string) { - alias = strings.ToLower(alias) - if alias != key && alias != v.realKey(key) { - _, exists := v.aliases[alias] - - if !exists { - // if we alias something that exists in one of the maps to another - // name, we'll never be able to get that value using the original - // name, so move the config value to the new realkey. - if val, ok := v.config[alias]; ok { - delete(v.config, alias) - v.config[key] = val - } - if val, ok := v.kvstore[alias]; ok { - delete(v.kvstore, alias) - v.kvstore[key] = val - } - if val, ok := v.defaults[alias]; ok { - delete(v.defaults, alias) - v.defaults[key] = val - } - if val, ok := v.override[alias]; ok { - delete(v.override, alias) - v.override[key] = val - } - v.aliases[alias] = key - } - } else { - v.logger.Warn("creating circular reference alias", "alias", alias, "key", key, "real_key", v.realKey(key)) - } -} - -func (v *Viper) realKey(key string) string { - newkey, exists := v.aliases[key] - if exists { - v.logger.Debug("key is an alias", "alias", key, "to", newkey) - - return v.realKey(newkey) - } - return key -} - -// InConfig checks to see if the given key (or an alias) is in the config file. -func InConfig(key string) bool { return v.InConfig(key) } - -func (v *Viper) InConfig(key string) bool { - lcaseKey := strings.ToLower(key) - - // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path := strings.Split(lcaseKey, v.keyDelim) - - return v.searchIndexableWithPathPrefixes(v.config, path) != nil -} - -// SetDefault sets the default value for this key. -// SetDefault is case-insensitive for a key. -// Default only used when no value is provided by the user via flag, config or ENV. -func SetDefault(key string, value any) { v.SetDefault(key, value) } - -func (v *Viper) SetDefault(key string, value any) { - // If alias passed in, then set the proper default - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// Set sets the value for the key in the override register. -// Set is case-insensitive for a key. -// Will be used instead of values obtained via -// flags, config file, ENV, default, or key/value store. -func Set(key string, value any) { v.Set(key, value) } - -func (v *Viper) Set(key string, value any) { - // If alias passed in, then set the proper override - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.override, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// ReadInConfig will discover and load the configuration file from disk -// and key/value stores, searching in one of the defined paths. -func ReadInConfig() error { return v.ReadInConfig() } - -func (v *Viper) ReadInConfig() error { - v.logger.Info("attempting to read in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !slices.Contains(SupportedExts, v.getConfigType()) { - return UnsupportedConfigError(v.getConfigType()) - } - - v.logger.Debug("reading file", "file", filename) - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - config := make(map[string]any) - - err = v.unmarshalReader(bytes.NewReader(file), config) - if err != nil { - return err - } - - v.config = config - return nil -} - -// MergeInConfig merges a new configuration with an existing config. -func MergeInConfig() error { return v.MergeInConfig() } - -func (v *Viper) MergeInConfig() error { - v.logger.Info("attempting to merge in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !slices.Contains(SupportedExts, v.getConfigType()) { - return UnsupportedConfigError(v.getConfigType()) - } - - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - return v.MergeConfig(bytes.NewReader(file)) -} - -// ReadConfig will read a configuration file, setting existing keys to nil if the -// key does not exist in the file. -func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } - -func (v *Viper) ReadConfig(in io.Reader) error { - if v.configType == "" { - return errors.New("cannot decode configuration: config type is not set") - } - - v.config = make(map[string]any) - return v.unmarshalReader(in, v.config) -} - -// MergeConfig merges a new configuration with an existing config. -func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } - -func (v *Viper) MergeConfig(in io.Reader) error { - if v.configType == "" { - return errors.New("cannot decode configuration: config type is not set") - } - - cfg := make(map[string]any) - if err := v.unmarshalReader(in, cfg); err != nil { - return err - } - return v.MergeConfigMap(cfg) -} - -// MergeConfigMap merges the configuration from the map given with an existing config. -// Note that the map given may be modified. -func MergeConfigMap(cfg map[string]any) error { return v.MergeConfigMap(cfg) } - -func (v *Viper) MergeConfigMap(cfg map[string]any) error { - if v.config == nil { - v.config = make(map[string]any) - } - insensitiviseMap(cfg) - mergeMaps(cfg, v.config, nil) - return nil -} - -// WriteConfig writes the current configuration to a file. -func WriteConfig() error { return v.WriteConfig() } - -func (v *Viper) WriteConfig() error { - filename, err := v.getConfigFile() - if err != nil { - return err - } - return v.writeConfig(filename, true) -} - -// SafeWriteConfig writes current configuration to file only if the file does not exist. -func SafeWriteConfig() error { return v.SafeWriteConfig() } - -func (v *Viper) SafeWriteConfig() error { - if len(v.configPaths) < 1 { - return errors.New("missing configuration for 'configPath'") - } - return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType)) -} - -// WriteConfigAs writes current configuration to a given filename. -func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } - -func (v *Viper) WriteConfigAs(filename string) error { - return v.writeConfig(filename, true) -} - -// WriteConfigTo writes current configuration to an [io.Writer]. -func WriteConfigTo(w io.Writer) error { return v.WriteConfigTo(w) } - -func (v *Viper) WriteConfigTo(w io.Writer) error { - format := strings.ToLower(v.getConfigType()) - - if !slices.Contains(SupportedExts, format) { - return UnsupportedConfigError(format) - } - - return v.marshalWriter(w, format) -} - -// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. -func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } - -func (v *Viper) SafeWriteConfigAs(filename string) error { - alreadyExists, err := afero.Exists(v.fs, filename) - if alreadyExists && err == nil { - return ConfigFileAlreadyExistsError(filename) - } - return v.writeConfig(filename, false) -} - -func (v *Viper) writeConfig(filename string, force bool) error { - v.logger.Info("attempting to write configuration to file") - - var configType string - - ext := filepath.Ext(filename) - if ext != "" && ext != filepath.Base(filename) { - configType = ext[1:] - } else { - configType = v.configType - } - if configType == "" { - return fmt.Errorf("config type could not be determined for %s", filename) - } - - if !slices.Contains(SupportedExts, configType) { - return UnsupportedConfigError(configType) - } - if v.config == nil { - v.config = make(map[string]any) - } - flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY - if !force { - flags |= os.O_EXCL - } - f, err := v.fs.OpenFile(filename, flags, v.configPermissions) - if err != nil { - return err - } - defer f.Close() - - if err := v.marshalWriter(f, configType); err != nil { - return err - } - - return f.Sync() -} - -func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { - buf := new(bytes.Buffer) - buf.ReadFrom(in) - - format := strings.ToLower(v.getConfigType()) - - if !slices.Contains(SupportedExts, format) { - return UnsupportedConfigError(format) - } - - decoder, err := v.decoderRegistry.Decoder(format) - if err != nil { - return ConfigParseError{err} - } - - err = decoder.Decode(buf.Bytes(), c) - if err != nil { - return ConfigParseError{err} - } - - insensitiviseMap(c) - return nil -} - -// Marshal a map into Writer. -func (v *Viper) marshalWriter(w io.Writer, configType string) error { - c := v.AllSettings() - - encoder, err := v.encoderRegistry.Encoder(configType) - if err != nil { - return ConfigMarshalError{err} - } - - b, err := encoder.Encode(c) - if err != nil { - return ConfigMarshalError{err} - } - - _, err = w.Write(b) - if err != nil { - return ConfigMarshalError{err} - } - - return nil -} - -func keyExists(k string, m map[string]any) string { - lk := strings.ToLower(k) - for mk := range m { - lmk := strings.ToLower(mk) - if lmk == lk { - return mk - } - } - return "" -} - -func castToMapStringInterface( - src map[any]any, -) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[fmt.Sprintf("%v", k)] = v - } - return tgt -} - -func castMapStringSliceToMapInterface(src map[string][]string) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapStringToMapInterface(src map[string]string) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapFlagToMapInterface(src map[string]FlagValue) map[string]any { - tgt := map[string]any{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's -// insistence on parsing nested structures as `map[any]any` -// instead of using a `string` as the key for nest structures beyond one level -// deep. Both map types are supported as there is a go-yaml fork that uses -// `map[string]any` instead. -func mergeMaps(src, tgt map[string]any, itgt map[any]any) { - for sk, sv := range src { - tk := keyExists(sk, tgt) - if tk == "" { - v.logger.Debug("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - tv, ok := tgt[tk] - if !ok { - v.logger.Debug("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - svType := reflect.TypeOf(sv) - tvType := reflect.TypeOf(tv) - - v.logger.Debug( - "processing", - "key", sk, - "st", svType, - "tt", tvType, - "sv", sv, - "tv", tv, - ) - - switch ttv := tv.(type) { - case map[any]any: - v.logger.Debug("merging maps (must convert)") - tsv, ok := sv.(map[any]any) - if !ok { - v.logger.Error( - "Could not cast sv to map[any]any", - "key", sk, - "st", svType, - "tt", tvType, - "sv", sv, - "tv", tv, - ) - continue - } - - ssv := castToMapStringInterface(tsv) - stv := castToMapStringInterface(ttv) - mergeMaps(ssv, stv, ttv) - case map[string]any: - v.logger.Debug("merging maps") - tsv, ok := sv.(map[string]any) - if !ok { - v.logger.Error( - "Could not cast sv to map[string]any", - "key", sk, - "st", svType, - "tt", tvType, - "sv", sv, - "tv", tv, - ) - continue - } - mergeMaps(tsv, ttv, nil) - default: - v.logger.Debug("setting value") - tgt[tk] = sv - if itgt != nil { - itgt[tk] = sv - } - } - } -} - -// AllKeys returns all keys holding a value, regardless of where they are set. -// Nested keys are returned with a v.keyDelim separator. -func AllKeys() []string { return v.AllKeys() } - -func (v *Viper) AllKeys() []string { - m := map[string]bool{} - // add all paths, by order of descending priority to ensure correct shadowing - m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") - m = v.flattenAndMergeMap(m, v.override, "") - m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env)) - m = v.flattenAndMergeMap(m, v.config, "") - m = v.flattenAndMergeMap(m, v.kvstore, "") - m = v.flattenAndMergeMap(m, v.defaults, "") - - // convert set of paths to list - a := make([]string, 0, len(m)) - for x := range m { - a = append(a, x) - } - return a -} - -// flattenAndMergeMap recursively flattens the given map into a map[string]bool -// of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. -// -// The resulting set of paths is merged to the given shadow set at the same time. -func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]any, prefix string) map[string]bool { - if shadow != nil && prefix != "" && shadow[prefix] { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]bool) - } - - var m2 map[string]any - if prefix != "" { - prefix += v.keyDelim - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = true - continue - } - // recursively merge to shadow map - shadow = v.flattenAndMergeMap(shadow, m2, fullKey) - } - return shadow -} - -// mergeFlatMap merges the given maps, excluding values of the second map -// shadowed by values from the first map. -func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]any) map[string]bool { - // scan keys -outer: - for k := range m { - path := strings.Split(k, v.keyDelim) - // scan intermediate paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if shadow[parentKey] { - // path is shadowed, continue - continue outer - } - } - // add key - shadow[strings.ToLower(k)] = true - } - return shadow -} - -// AllSettings merges all settings and returns them as a map[string]any. -func AllSettings() map[string]any { return v.AllSettings() } - -func (v *Viper) AllSettings() map[string]any { - return v.getSettings(v.AllKeys()) -} - -func (v *Viper) getSettings(keys []string) map[string]any { - m := map[string]any{} - // start from the list of keys, and construct the map one value at a time - for _, k := range keys { - value := v.Get(k) - if value == nil { - // should not happen, since AllKeys() returns only keys holding a value, - // check just in case anything changes - continue - } - path := strings.Split(k, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(m, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - return m -} - -// SetFs sets the filesystem to use to read configuration. -func SetFs(fs afero.Fs) { v.SetFs(fs) } - -func (v *Viper) SetFs(fs afero.Fs) { - v.fs = fs -} - -// SetConfigName sets name for the config file. -// Does not include extension. -func SetConfigName(in string) { v.SetConfigName(in) } - -func (v *Viper) SetConfigName(in string) { - if v.finder != nil { - v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "SetConfigName")) - } - - if in != "" { - v.configName = in - v.configFile = "" - } -} - -// SetConfigType sets the type of the configuration returned by the -// remote source, e.g. "json". -func SetConfigType(in string) { v.SetConfigType(in) } - -func (v *Viper) SetConfigType(in string) { - if in != "" { - v.configType = in - } -} - -// SetConfigPermissions sets the permissions for the config file. -func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } - -func (v *Viper) SetConfigPermissions(perm os.FileMode) { - v.configPermissions = perm.Perm() -} - -func (v *Viper) getConfigType() string { - if v.configType != "" { - return v.configType - } - - cf, err := v.getConfigFile() - if err != nil { - return "" - } - - ext := filepath.Ext(cf) - - if len(ext) > 1 { - return ext[1:] - } - - return "" -} - -func (v *Viper) getConfigFile() (string, error) { - if v.configFile == "" { - cf, err := v.findConfigFile() - if err != nil { - return "", err - } - v.configFile = cf - } - return v.configFile, nil -} - -// Debug prints all configuration registries for debugging -// purposes. -func Debug() { v.Debug() } -func DebugTo(w io.Writer) { v.DebugTo(w) } - -func (v *Viper) Debug() { v.DebugTo(os.Stdout) } - -func (v *Viper) DebugTo(w io.Writer) { - fmt.Fprintf(w, "Aliases:\n%#v\n", v.aliases) - fmt.Fprintf(w, "Override:\n%#v\n", v.override) - fmt.Fprintf(w, "PFlags:\n%#v\n", v.pflags) - fmt.Fprintf(w, "Env:\n%#v\n", v.env) - fmt.Fprintf(w, "Key/Value Store:\n%#v\n", v.kvstore) - fmt.Fprintf(w, "Config:\n%#v\n", v.config) - fmt.Fprintf(w, "Defaults:\n%#v\n", v.defaults) -} diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/.env b/openshift/tools/vendor/github.com/subosito/gotenv/.env deleted file mode 100644 index 6405eca71f..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/.env +++ /dev/null @@ -1 +0,0 @@ -HELLO=world diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/.env.invalid b/openshift/tools/vendor/github.com/subosito/gotenv/.env.invalid deleted file mode 100644 index 016d5e0cea..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/.env.invalid +++ /dev/null @@ -1 +0,0 @@ -lol$wut diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/.gitignore b/openshift/tools/vendor/github.com/subosito/gotenv/.gitignore deleted file mode 100644 index 7db37c1db4..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.test -*.out -annotate.json -profile.cov diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/.golangci.yaml b/openshift/tools/vendor/github.com/subosito/gotenv/.golangci.yaml deleted file mode 100644 index 8c82a762e2..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/.golangci.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Options for analysis running. -run: - timeout: 1m - -linters-settings: - gofmt: - simplify: true diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/CHANGELOG.md b/openshift/tools/vendor/github.com/subosito/gotenv/CHANGELOG.md deleted file mode 100644 index c4fe7d3268..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/CHANGELOG.md +++ /dev/null @@ -1,105 +0,0 @@ -# Changelog - -## [1.5.0] - 2023-08-15 - -### Fixed - -- Use io.Reader instead of custom Reader - -## [1.5.0] - 2023-08-15 - -### Added - -- Support for reading UTF16 files - -### Fixed - -- Scanner error handling -- Reader error handling - -## [1.4.2] - 2023-01-11 - -### Fixed - -- Env var initialization - -### Changed - -- More consitent line splitting - -## [1.4.1] - 2022-08-23 - -### Fixed - -- Missing file close - -### Changed - -- Updated dependencies - -## [1.4.0] - 2022-06-02 - -### Added - -- Add `Marshal` and `Unmarshal` helpers - -### Changed - -- The CI will now run a linter and the tests on PRs. - -## [1.3.0] - 2022-05-23 - -### Added - -- Support = within double-quoted strings -- Add support for multiline values - -### Changed - -- `OverLoad` prefer environment variables over local variables - -## [1.2.0] - 2019-08-03 - -### Added - -- Add `Must` helper to raise an error as panic. It can be used with `Load` and `OverLoad`. -- Add more tests to be 100% coverage. -- Add CHANGELOG -- Add more OS for the test: OSX and Windows - -### Changed - -- Reduce complexity and improve source code for having `A+` score in [goreportcard](https://goreportcard.com/report/github.com/subosito/gotenv). -- Updated README with mentions to all available functions - -### Removed - -- Remove `ErrFormat` -- Remove `MustLoad` and `MustOverload`, replaced with `Must` helper. - -## [1.1.1] - 2018-06-05 - -### Changed - -- Replace `os.Getenv` with `os.LookupEnv` to ensure that the environment variable is not set, by [radding](https://github.com/radding) - -## [1.1.0] - 2017-03-20 - -### Added - -- Supports carriage return in env -- Handle files with UTF-8 BOM - -### Changed - -- Whitespace handling - -### Fixed - -- Incorrect variable expansion -- Handling escaped '$' characters - -## [1.0.0] - 2014-10-05 - -First stable release. - diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/LICENSE b/openshift/tools/vendor/github.com/subosito/gotenv/LICENSE deleted file mode 100644 index f64ccaedc3..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Alif Rachmawadi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/README.md b/openshift/tools/vendor/github.com/subosito/gotenv/README.md deleted file mode 100644 index fc9616e3b0..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# gotenv - -[![Build Status](https://github.com/subosito/gotenv/workflows/Go%20workflow/badge.svg)](https://github.com/subosito/gotenv/actions) -[![Coverage Status](https://badgen.net/codecov/c/github/subosito/gotenv)](https://codecov.io/gh/subosito/gotenv) -[![Go Report Card](https://goreportcard.com/badge/github.com/subosito/gotenv)](https://goreportcard.com/report/github.com/subosito/gotenv) -[![GoDoc](https://godoc.org/github.com/subosito/gotenv?status.svg)](https://godoc.org/github.com/subosito/gotenv) - -Load environment variables from `.env` or `io.Reader` in Go. - -## Usage - -Put the gotenv package on your `import` statement: - -```go -import "github.com/subosito/gotenv" -``` - -To modify your app environment variables, `gotenv` expose 2 main functions: - -- `gotenv.Load` -- `gotenv.Apply` - -By default, `gotenv.Load` will look for a file called `.env` in the current working directory. - -Behind the scene, it will then load `.env` file and export the valid variables to the environment variables. Make sure you call the method as soon as possible to ensure it loads all variables, say, put it on `init()` function. - -Once loaded you can use `os.Getenv()` to get the value of the variable. - -Let's say you have `.env` file: - -```sh -APP_ID=1234567 -APP_SECRET=abcdef -``` - -Here's the example of your app: - -```go -package main - -import ( - "github.com/subosito/gotenv" - "log" - "os" -) - -func init() { - gotenv.Load() -} - -func main() { - log.Println(os.Getenv("APP_ID")) // "1234567" - log.Println(os.Getenv("APP_SECRET")) // "abcdef" -} -``` - -You can also load other than `.env` file if you wish. Just supply filenames when calling `Load()`. It will load them in order and the first value set for a variable will win.: - -```go -gotenv.Load(".env.production", "credentials") -``` - -While `gotenv.Load` loads entries from `.env` file, `gotenv.Apply` allows you to use any `io.Reader`: - -```go -gotenv.Apply(strings.NewReader("APP_ID=1234567")) - -log.Println(os.Getenv("APP_ID")) -// Output: "1234567" -``` - -Both `gotenv.Load` and `gotenv.Apply` **DO NOT** overrides existing environment variables. If you want to override existing ones, you can see section below. - -### Environment Overrides - -Besides above functions, `gotenv` also provides another functions that overrides existing: - -- `gotenv.OverLoad` -- `gotenv.OverApply` - -Here's the example of this overrides behavior: - -```go -os.Setenv("HELLO", "world") - -// NOTE: using Apply existing value will be reserved -gotenv.Apply(strings.NewReader("HELLO=universe")) -fmt.Println(os.Getenv("HELLO")) -// Output: "world" - -// NOTE: using OverApply existing value will be overridden -gotenv.OverApply(strings.NewReader("HELLO=universe")) -fmt.Println(os.Getenv("HELLO")) -// Output: "universe" -``` - -### Throw a Panic - -Both `gotenv.Load` and `gotenv.OverLoad` returns an error on something wrong occurred, like your env file is not exist, and so on. To make it easier to use, `gotenv` also provides `gotenv.Must` helper, to let it panic when an error returned. - -```go -err := gotenv.Load(".env-is-not-exist") -fmt.Println("error", err) -// error: open .env-is-not-exist: no such file or directory - -gotenv.Must(gotenv.Load, ".env-is-not-exist") -// it will throw a panic -// panic: open .env-is-not-exist: no such file or directory -``` - -### Another Scenario - -Just in case you want to parse environment variables from any `io.Reader`, gotenv keeps its `Parse` and `StrictParse` function as public API so you can use that. - -```go -// import "strings" - -pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO")) -// gotenv.Env{"FOO": "test", "BAR": "test"} - -pairs, err := gotenv.StrictParse(strings.NewReader(`FOO="bar"`)) -// gotenv.Env{"FOO": "bar"} -``` - -`Parse` ignores invalid lines and returns `Env` of valid environment variables, while `StrictParse` returns an error for invalid lines. - -## Notes - -The gotenv package is a Go port of [`dotenv`](https://github.com/bkeepers/dotenv) project with some additions made for Go. For general features, it aims to be compatible as close as possible. diff --git a/openshift/tools/vendor/github.com/subosito/gotenv/gotenv.go b/openshift/tools/vendor/github.com/subosito/gotenv/gotenv.go deleted file mode 100644 index 1191d35874..0000000000 --- a/openshift/tools/vendor/github.com/subosito/gotenv/gotenv.go +++ /dev/null @@ -1,409 +0,0 @@ -// Package gotenv provides functionality to dynamically load the environment variables -package gotenv - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/encoding/unicode" - "golang.org/x/text/transform" -) - -const ( - // Pattern for detecting valid line format - linePattern = `\A\s*(?:export\s+)?([\w\.]+)(?:\s*=\s*|:\s+?)('(?:\'|[^'])*'|"(?:\"|[^"])*"|[^#\n]+)?\s*(?:\s*\#.*)?\z` - - // Pattern for detecting valid variable within a value - variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)` -) - -// Byte order mark character -var ( - bomUTF8 = []byte("\xEF\xBB\xBF") - bomUTF16LE = []byte("\xFF\xFE") - bomUTF16BE = []byte("\xFE\xFF") -) - -// Env holds key/value pair of valid environment variable -type Env map[string]string - -// Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist. -// When it's called with no argument, it will load `.env` file on the current path and set the environment variables. -// Otherwise, it will loop over the filenames parameter and set the proper environment variables. -func Load(filenames ...string) error { - return loadenv(false, filenames...) -} - -// OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables. -func OverLoad(filenames ...string) error { - return loadenv(true, filenames...) -} - -// Must is wrapper function that will panic when supplied function returns an error. -func Must(fn func(filenames ...string) error, filenames ...string) { - if err := fn(filenames...); err != nil { - panic(err.Error()) - } -} - -// Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist. -func Apply(r io.Reader) error { - return parset(r, false) -} - -// OverApply is a function to load an io Reader then export and override the valid variables into environment variables. -func OverApply(r io.Reader) error { - return parset(r, true) -} - -func loadenv(override bool, filenames ...string) error { - if len(filenames) == 0 { - filenames = []string{".env"} - } - - for _, filename := range filenames { - f, err := os.Open(filename) - if err != nil { - return err - } - - err = parset(f, override) - f.Close() - if err != nil { - return err - } - } - - return nil -} - -// parse and set :) -func parset(r io.Reader, override bool) error { - env, err := strictParse(r, override) - if err != nil { - return err - } - - for key, val := range env { - setenv(key, val, override) - } - - return nil -} - -func setenv(key, val string, override bool) { - if override { - os.Setenv(key, val) - } else { - if _, present := os.LookupEnv(key); !present { - os.Setenv(key, val) - } - } -} - -// Parse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is skipping any invalid lines and only processing the valid one. -func Parse(r io.Reader) Env { - env, _ := strictParse(r, false) - return env -} - -// StrictParse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is returning an error if there are any invalid lines. -func StrictParse(r io.Reader) (Env, error) { - return strictParse(r, false) -} - -// Read is a function to parse a file line by line and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is skipping any invalid lines and only processing the valid one. -func Read(filename string) (Env, error) { - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - return strictParse(f, false) -} - -// Unmarshal reads a string line by line and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is returning an error if there are any invalid lines. -func Unmarshal(str string) (Env, error) { - return strictParse(strings.NewReader(str), false) -} - -// Marshal outputs the given environment as a env file. -// Variables will be sorted by name. -func Marshal(env Env) (string, error) { - lines := make([]string, 0, len(env)) - for k, v := range env { - if d, err := strconv.Atoi(v); err == nil { - lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) - } else { - lines = append(lines, fmt.Sprintf(`%s=%q`, k, v)) - } - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil -} - -// Write serializes the given environment and writes it to a file -func Write(env Env, filename string) error { - content, err := Marshal(env) - if err != nil { - return err - } - // ensure the path exists - if err := os.MkdirAll(filepath.Dir(filename), 0o775); err != nil { - return err - } - // create or truncate the file - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - _, err = file.WriteString(content + "\n") - if err != nil { - return err - } - - return file.Sync() -} - -// splitLines is a valid SplitFunc for a bufio.Scanner. It will split lines on CR ('\r'), LF ('\n') or CRLF (any of the three sequences). -// If a CR is immediately followed by a LF, it is treated as a CRLF (one single line break). -func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, bufio.ErrFinalToken - } - - idx := bytes.IndexAny(data, "\r\n") - switch { - case atEOF && idx < 0: - return len(data), data, bufio.ErrFinalToken - - case idx < 0: - return 0, nil, nil - } - - // consume CR or LF - eol := idx + 1 - // detect CRLF - if len(data) > eol && data[eol-1] == '\r' && data[eol] == '\n' { - eol++ - } - - return eol, data[:idx], nil -} - -func strictParse(r io.Reader, override bool) (Env, error) { - env := make(Env) - - buf := new(bytes.Buffer) - tee := io.TeeReader(r, buf) - - // There can be a maximum of 3 BOM bytes. - bomByteBuffer := make([]byte, 3) - _, err := tee.Read(bomByteBuffer) - if err != nil && err != io.EOF { - return env, err - } - - z := io.MultiReader(buf, r) - - // We chooes a different scanner depending on file encoding. - var scanner *bufio.Scanner - - if bytes.HasPrefix(bomByteBuffer, bomUTF8) { - scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF8BOM.NewDecoder())) - } else if bytes.HasPrefix(bomByteBuffer, bomUTF16LE) { - scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.LittleEndian, unicode.ExpectBOM).NewDecoder())) - } else if bytes.HasPrefix(bomByteBuffer, bomUTF16BE) { - scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.BigEndian, unicode.ExpectBOM).NewDecoder())) - } else { - scanner = bufio.NewScanner(z) - } - - scanner.Split(splitLines) - - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return env, err - } - - line := strings.TrimSpace(scanner.Text()) - if line == "" || line[0] == '#' { - continue - } - - quote := "" - // look for the delimiter character - idx := strings.Index(line, "=") - if idx == -1 { - idx = strings.Index(line, ":") - } - // look for a quote character - if idx > 0 && idx < len(line)-1 { - val := strings.TrimSpace(line[idx+1:]) - if val[0] == '"' || val[0] == '\'' { - quote = val[:1] - // look for the closing quote character within the same line - idx = strings.LastIndex(strings.TrimSpace(val[1:]), quote) - if idx >= 0 && val[idx] != '\\' { - quote = "" - } - } - } - // look for the closing quote character - for quote != "" && scanner.Scan() { - l := scanner.Text() - line += "\n" + l - idx := strings.LastIndex(l, quote) - if idx > 0 && l[idx-1] == '\\' { - // foud a matching quote character but it's escaped - continue - } - if idx >= 0 { - // foud a matching quote - quote = "" - } - } - - if quote != "" { - return env, fmt.Errorf("missing quotes") - } - - err := parseLine(line, env, override) - if err != nil { - return env, err - } - } - - return env, scanner.Err() -} - -var ( - lineRgx = regexp.MustCompile(linePattern) - unescapeRgx = regexp.MustCompile(`\\([^$])`) - varRgx = regexp.MustCompile(variablePattern) -) - -func parseLine(s string, env Env, override bool) error { - rm := lineRgx.FindStringSubmatch(s) - - if len(rm) == 0 { - return checkFormat(s, env) - } - - key := strings.TrimSpace(rm[1]) - val := strings.TrimSpace(rm[2]) - - var hsq, hdq bool - - // check if the value is quoted - if l := len(val); l >= 2 { - l -= 1 - // has double quotes - hdq = val[0] == '"' && val[l] == '"' - // has single quotes - hsq = val[0] == '\'' && val[l] == '\'' - - // remove quotes '' or "" - if hsq || hdq { - val = val[1:l] - } - } - - if hdq { - val = strings.ReplaceAll(val, `\n`, "\n") - val = strings.ReplaceAll(val, `\r`, "\r") - - // Unescape all characters except $ so variables can be escaped properly - val = unescapeRgx.ReplaceAllString(val, "$1") - } - - if !hsq { - fv := func(s string) string { - return varReplacement(s, hsq, env, override) - } - val = varRgx.ReplaceAllStringFunc(val, fv) - } - - env[key] = val - return nil -} - -func parseExport(st string, env Env) error { - if strings.HasPrefix(st, "export") { - vs := strings.SplitN(st, " ", 2) - - if len(vs) > 1 { - if _, ok := env[vs[1]]; !ok { - return fmt.Errorf("line `%s` has an unset variable", st) - } - } - } - - return nil -} - -var varNameRgx = regexp.MustCompile(`(\$)(\{?([A-Z0-9_]+)\}?)`) - -func varReplacement(s string, hsq bool, env Env, override bool) string { - if s == "" { - return s - } - - if s[0] == '\\' { - // the dollar sign is escaped - return s[1:] - } - - if hsq { - return s - } - - mn := varNameRgx.FindStringSubmatch(s) - - if len(mn) == 0 { - return s - } - - v := mn[3] - - if replace, ok := os.LookupEnv(v); ok && !override { - return replace - } - - if replace, ok := env[v]; ok { - return replace - } - - return os.Getenv(v) -} - -func checkFormat(s string, env Env) error { - st := strings.TrimSpace(s) - - if st == "" || st[0] == '#' { - return nil - } - - if err := parseExport(st, env); err != nil { - return err - } - - return fmt.Errorf("line `%s` doesn't match format", s) -} diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/LICENSE b/openshift/tools/vendor/github.com/vbatts/tar-split/LICENSE new file mode 100644 index 0000000000..ca03685b15 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/common.go new file mode 100644 index 0000000000..e687a08c96 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -0,0 +1,724 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// +// Tape archives (tar) are a file format for storing a sequence of files that +// can be read and written in a streaming manner. +// This package aims to cover most variations of the format, +// including those produced by GNU and BSD tar tools. +package tar + +import ( + "errors" + "fmt" + "math" + "os" + "path" + "reflect" + "strconv" + "strings" + "time" +) + +// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit +// architectures. If a large value is encountered when decoding, the result +// stored in Header will be the truncated version. + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errMissData = errors.New("archive/tar: sparse file references non-existent data") + errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") + errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") +) + +type headerError []string + +func (he headerError) Error() string { + const prefix = "archive/tar: cannot encode header" + var ss []string + for _, s := range he { + if s != "" { + ss = append(ss, s) + } + } + if len(ss) == 0 { + return prefix + } + return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) +} + +// Type flags for Header.Typeflag. +const ( + // Type '0' indicates a regular file. + TypeReg = '0' + TypeRegA = '\x00' // Deprecated: Use TypeReg instead. + + // Type '1' to '6' are header-only flags and may not have a data body. + TypeLink = '1' // Hard link + TypeSymlink = '2' // Symbolic link + TypeChar = '3' // Character device node + TypeBlock = '4' // Block device node + TypeDir = '5' // Directory + TypeFifo = '6' // FIFO node + + // Type '7' is reserved. + TypeCont = '7' + + // Type 'x' is used by the PAX format to store key-value records that + // are only relevant to the next file. + // This package transparently handles these types. + TypeXHeader = 'x' + + // Type 'g' is used by the PAX format to store key-value records that + // are relevant to all subsequent files. + // This package only supports parsing and composing such headers, + // but does not currently support persisting the global state across files. + TypeXGlobalHeader = 'g' + + // Type 'S' indicates a sparse file in the GNU format. + TypeGNUSparse = 'S' + + // Types 'L' and 'K' are used by the GNU format for a meta file + // used to store the path or link name for the next file. + // This package transparently handles these types. + TypeGNULongName = 'L' + TypeGNULongLink = 'K' +) + +// Keywords for PAX extended header records. +const ( + paxNone = "" // Indicates that no PAX key is suitable + paxPath = "path" + paxLinkpath = "linkpath" + paxSize = "size" + paxUid = "uid" + paxGid = "gid" + paxUname = "uname" + paxGname = "gname" + paxMtime = "mtime" + paxAtime = "atime" + paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid + paxCharset = "charset" // Currently unused + paxComment = "comment" // Currently unused + + paxSchilyXattr = "SCHILY.xattr." + + // Keywords for GNU sparse files in a PAX extended header. + paxGNUSparse = "GNU.sparse." + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// basicKeys is a set of the PAX keys for which we have built-in support. +// This does not contain "charset" or "comment", which are both PAX-specific, +// so adding them as first-class features of Header is unlikely. +// Users can use the PAXRecords field to set it themselves. +var basicKeys = map[string]bool{ + paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, + paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, +} + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +// +// For forward compatibility, users that retrieve a Header from Reader.Next, +// mutate it in some ways, and then pass it back to Writer.WriteHeader +// should do so by creating a new Header and copying the fields +// that they are interested in preserving. +type Header struct { + // Typeflag is the type of header entry. + // The zero value is automatically promoted to either TypeReg or TypeDir + // depending on the presence of a trailing slash in Name. + Typeflag byte + + Name string // Name of file entry + Linkname string // Target name of link (valid for TypeLink or TypeSymlink) + + Size int64 // Logical file size in bytes + Mode int64 // Permission and mode bits + Uid int // User ID of owner + Gid int // Group ID of owner + Uname string // User name of owner + Gname string // Group name of owner + + // If the Format is unspecified, then Writer.WriteHeader rounds ModTime + // to the nearest second and ignores the AccessTime and ChangeTime fields. + // + // To use AccessTime or ChangeTime, specify the Format as PAX or GNU. + // To use sub-second resolution, specify the Format as PAX. + ModTime time.Time // Modification time + AccessTime time.Time // Access time (requires either PAX or GNU support) + ChangeTime time.Time // Change time (requires either PAX or GNU support) + + Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) + Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) + + // Xattrs stores extended attributes as PAX records under the + // "SCHILY.xattr." namespace. + // + // The following are semantically equivalent: + // h.Xattrs[key] = value + // h.PAXRecords["SCHILY.xattr."+key] = value + // + // When Writer.WriteHeader is called, the contents of Xattrs will take + // precedence over those in PAXRecords. + // + // Deprecated: Use PAXRecords instead. + Xattrs map[string]string + + // PAXRecords is a map of PAX extended header records. + // + // User-defined records should have keys of the following form: + // VENDOR.keyword + // Where VENDOR is some namespace in all uppercase, and keyword may + // not contain the '=' character (e.g., "GOLANG.pkg.version"). + // The key and value should be non-empty UTF-8 strings. + // + // When Writer.WriteHeader is called, PAX records derived from the + // other fields in Header take precedence over PAXRecords. + PAXRecords map[string]string + + // Format specifies the format of the tar header. + // + // This is set by Reader.Next as a best-effort guess at the format. + // Since the Reader liberally reads some non-compliant files, + // it is possible for this to be FormatUnknown. + // + // If the format is unspecified when Writer.WriteHeader is called, + // then it uses the first format (in the order of USTAR, PAX, GNU) + // capable of encoding this Header (see Format). + Format Format +} + +// sparseEntry represents a Length-sized fragment at Offset in the file. +type sparseEntry struct{ Offset, Length int64 } + +func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length } + +// A sparse file can be represented as either a sparseDatas or a sparseHoles. +// As long as the total size is known, they are equivalent and one can be +// converted to the other form and back. The various tar formats with sparse +// file support represent sparse files in the sparseDatas form. That is, they +// specify the fragments in the file that has data, and treat everything else as +// having zero bytes. As such, the encoding and decoding logic in this package +// deals with sparseDatas. +// +// However, the external API uses sparseHoles instead of sparseDatas because the +// zero value of sparseHoles logically represents a normal file (i.e., there are +// no holes in it). On the other hand, the zero value of sparseDatas implies +// that the file has no data in it, which is rather odd. +// +// As an example, if the underlying raw file contains the 10-byte data: +// var compactFile = "abcdefgh" +// +// And the sparse map has the following entries: +// var spd sparseDatas = []sparseEntry{ +// {Offset: 2, Length: 5}, // Data fragment for 2..6 +// {Offset: 18, Length: 3}, // Data fragment for 18..20 +// } +// var sph sparseHoles = []sparseEntry{ +// {Offset: 0, Length: 2}, // Hole fragment for 0..1 +// {Offset: 7, Length: 11}, // Hole fragment for 7..17 +// {Offset: 21, Length: 4}, // Hole fragment for 21..24 +// } +// +// Then the content of the resulting sparse file with a Header.Size of 25 is: +// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type ( + sparseDatas []sparseEntry + sparseHoles []sparseEntry +) + +// validateSparseEntries reports whether sp is a valid sparse map. +// It does not matter whether sp represents data fragments or hole fragments. +func validateSparseEntries(sp []sparseEntry, size int64) bool { + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + if size < 0 { + return false + } + var pre sparseEntry + for _, cur := range sp { + switch { + case cur.Offset < 0 || cur.Length < 0: + return false // Negative values are never okay + case cur.Offset > math.MaxInt64-cur.Length: + return false // Integer overflow with large length + case cur.endOffset() > size: + return false // Region extends beyond the actual size + case pre.endOffset() > cur.Offset: + return false // Regions cannot overlap and must be in order + } + pre = cur + } + return true +} + +// alignSparseEntries mutates src and returns dst where each fragment's +// starting offset is aligned up to the nearest block edge, and each +// ending offset is aligned down to the nearest block edge. +// +// Even though the Go tar Reader and the BSD tar utility can handle entries +// with arbitrary offsets and lengths, the GNU tar utility can only handle +// offsets and lengths that are multiples of blockSize. +func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry { + dst := src[:0] + for _, s := range src { + pos, end := s.Offset, s.endOffset() + pos += blockPadding(+pos) // Round-up to nearest blockSize + if end != size { + end -= blockPadding(-end) // Round-down to nearest blockSize + } + if pos < end { + dst = append(dst, sparseEntry{Offset: pos, Length: end - pos}) + } + } + return dst +} + +// invertSparseEntries converts a sparse map from one form to the other. +// If the input is sparseHoles, then it will output sparseDatas and vice-versa. +// The input must have been already validated. +// +// This function mutates src and returns a normalized map where: +// * adjacent fragments are coalesced together +// * only the last fragment may be empty +// * the endOffset of the last fragment is the total size +func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry { + dst := src[:0] + var pre sparseEntry + for _, cur := range src { + if cur.Length == 0 { + continue // Skip empty fragments + } + pre.Length = cur.Offset - pre.Offset + if pre.Length > 0 { + dst = append(dst, pre) // Only add non-empty fragments + } + pre.Offset = cur.endOffset() + } + pre.Length = size - pre.Offset // Possibly the only empty fragment + return append(dst, pre) +} + +// fileState tracks the number of logical (includes sparse holes) and physical +// (actual in tar archive) bytes remaining for the current file. +// +// Invariant: LogicalRemaining >= PhysicalRemaining +type fileState interface { + LogicalRemaining() int64 + PhysicalRemaining() int64 +} + +// allowedFormats determines which formats can be used. +// The value returned is the logical OR of multiple possible formats. +// If the value is FormatUnknown, then the input Header cannot be encoded +// and an error is returned explaining why. +// +// As a by-product of checking the fields, this function returns paxHdrs, which +// contain all fields that could not be directly encoded. +// A value receiver ensures that this method does not mutate the source Header. +func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { + format = FormatUSTAR | FormatPAX | FormatGNU + paxHdrs = make(map[string]string) + + var whyNoUSTAR, whyNoPAX, whyNoGNU string + var preferPAX bool // Prefer PAX over USTAR + verifyString := func(s string, size int, name, paxKey string) { + // NUL-terminator is optional for path and linkpath. + // Technically, it is required for uname and gname, + // but neither GNU nor BSD tar checks for it. + tooLong := len(s) > size + allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath + if hasNUL(s) || (tooLong && !allowLongGNU) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) + format.mustNotBe(FormatGNU) + } + if !isASCII(s) || tooLong { + canSplitUSTAR := paxKey == paxPath + if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) + format.mustNotBe(FormatUSTAR) + } + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = s + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == s { + paxHdrs[paxKey] = v + } + } + verifyNumeric := func(n int64, size int, name, paxKey string) { + if !fitsInBase256(size, n) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) + format.mustNotBe(FormatGNU) + } + if !fitsInOctal(size, n) { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) + format.mustNotBe(FormatUSTAR) + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = strconv.FormatInt(n, 10) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { + paxHdrs[paxKey] = v + } + } + verifyTime := func(ts time.Time, size int, name, paxKey string) { + if ts.IsZero() { + return // Always okay + } + if !fitsInBase256(size, ts.Unix()) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) + format.mustNotBe(FormatGNU) + } + isMtime := paxKey == paxMtime + fitsOctal := fitsInOctal(size, ts.Unix()) + if (isMtime && !fitsOctal) || !isMtime { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) + format.mustNotBe(FormatUSTAR) + } + needsNano := ts.Nanosecond() != 0 + if !isMtime || !fitsOctal || needsNano { + preferPAX = true // USTAR may truncate sub-second measurements + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = formatPAXTime(ts) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { + paxHdrs[paxKey] = v + } + } + + // Check basic fields. + var blk block + v7 := blk.V7() + ustar := blk.USTAR() + gnu := blk.GNU() + verifyString(h.Name, len(v7.Name()), "Name", paxPath) + verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) + verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) + verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) + verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) + verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) + verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) + verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) + verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) + verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) + verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) + verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) + verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) + + // Check for header-only types. + var whyOnlyPAX, whyOnlyGNU string + switch h.Typeflag { + case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: + // Exclude TypeLink and TypeSymlink, since they may reference directories. + if strings.HasSuffix(h.Name, "/") { + return FormatUnknown, nil, headerError{"filename may not have trailing slash"} + } + case TypeXHeader, TypeGNULongName, TypeGNULongLink: + return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} + case TypeXGlobalHeader: + h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format} + if !reflect.DeepEqual(h, h2) { + return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"} + } + whyOnlyPAX = "only PAX supports TypeXGlobalHeader" + format.mayOnlyBe(FormatPAX) + } + if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { + return FormatUnknown, nil, headerError{"negative size on header-only type"} + } + + // Check PAX records. + if len(h.Xattrs) > 0 { + for k, v := range h.Xattrs { + paxHdrs[paxSchilyXattr+k] = v + } + whyOnlyPAX = "only PAX supports Xattrs" + format.mayOnlyBe(FormatPAX) + } + if len(h.PAXRecords) > 0 { + for k, v := range h.PAXRecords { + switch _, exists := paxHdrs[k]; { + case exists: + continue // Do not overwrite existing records + case h.Typeflag == TypeXGlobalHeader: + paxHdrs[k] = v // Copy all records + case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): + paxHdrs[k] = v // Ignore local records that may conflict + } + } + whyOnlyPAX = "only PAX supports PAXRecords" + format.mayOnlyBe(FormatPAX) + } + for k, v := range paxHdrs { + if !validPAXRecord(k, v) { + return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} + } + } + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Check sparse files. + if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { + if isHeaderOnlyType(h.Typeflag) { + return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} + } + if !validateSparseEntries(h.SparseHoles, h.Size) { + return FormatUnknown, nil, headerError{"invalid sparse holes"} + } + if h.Typeflag == TypeGNUSparse { + whyOnlyGNU = "only GNU supports TypeGNUSparse" + format.mayOnlyBe(FormatGNU) + } else { + whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" + format.mustNotBe(FormatGNU) + } + whyNoUSTAR = "USTAR does not support sparse files" + format.mustNotBe(FormatUSTAR) + } + */ + + // Check desired format. + if wantFormat := h.Format; wantFormat != FormatUnknown { + if wantFormat.has(FormatPAX) && !preferPAX { + wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too + } + format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted + } + if format == FormatUnknown { + switch h.Format { + case FormatUSTAR: + err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} + case FormatPAX: + err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} + case FormatGNU: + err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} + default: + err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} + } + } + return format, paxHdrs, err +} + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + mode |= os.ModeSticky + } + + // Set file mode bits; clear perm, setuid, setgid, and sticky bits. + switch m := os.FileMode(fi.h.Mode) &^ 07777; m { + case c_ISDIR: + mode |= os.ModeDir + case c_ISFIFO: + mode |= os.ModeNamedPipe + case c_ISLNK: + mode |= os.ModeSymlink + case c_ISBLK: + mode |= os.ModeDevice + case c_ISCHR: + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case c_ISSOCK: + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + mode |= os.ModeSymlink + case TypeChar: + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + mode |= os.ModeDevice + case TypeDir: + mode |= os.ModeDir + case TypeFifo: + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +const ( + // Mode constants from the USTAR spec: + // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + + // Common Unix mode constants; these are not defined in any common tar standard. + // Header.FileInfo understands these, but FileInfoHeader will never produce these. + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// +// Since os.FileInfo's Name method only returns the base name of +// the file it describes, it may be necessary to modify Header.Name +// to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("archive/tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Typeflag = TypeChar + } else { + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + case fm&os.ModeSocket != 0: + return nil, fmt.Errorf("archive/tar: sockets not supported") + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + if sys.PAXRecords != nil { + h.PAXRecords = make(map[string]string) + for k, v := range sys.PAXRecords { + h.PAXRecords[k] = v + } + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/format.go new file mode 100644 index 0000000000..60977980c5 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/format.go @@ -0,0 +1,307 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import "strings" + +// Format represents the tar archive format. +// +// The original tar format was introduced in Unix V7. +// Since then, there have been multiple competing formats attempting to +// standardize or extend the V7 format to overcome its limitations. +// The most common formats are the USTAR, PAX, and GNU formats, +// each with their own advantages and limitations. +// +// The following table captures the capabilities of each format: +// +// | USTAR | PAX | GNU +// ------------------+--------+-----------+---------- +// Name | 256B | unlimited | unlimited +// Linkname | 100B | unlimited | unlimited +// Size | uint33 | unlimited | uint89 +// Mode | uint21 | uint21 | uint57 +// Uid/Gid | uint21 | unlimited | uint57 +// Uname/Gname | 32B | unlimited | 32B +// ModTime | uint33 | unlimited | int89 +// AccessTime | n/a | unlimited | int89 +// ChangeTime | n/a | unlimited | int89 +// Devmajor/Devminor | uint21 | uint21 | uint57 +// ------------------+--------+-----------+---------- +// string encoding | ASCII | UTF-8 | binary +// sub-second times | no | yes | no +// sparse files | no | yes | yes +// +// The table's upper portion shows the Header fields, where each format reports +// the maximum number of bytes allowed for each string field and +// the integer type used to store each numeric field +// (where timestamps are stored as the number of seconds since the Unix epoch). +// +// The table's lower portion shows specialized features of each format, +// such as supported string encodings, support for sub-second timestamps, +// or support for sparse files. +// +// The Writer currently provides no support for sparse files. +type Format int + +// Constants to identify various tar formats. +const ( + // Deliberately hide the meaning of constants from public API. + _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc... + + // FormatUnknown indicates that the format is unknown. + FormatUnknown + + // The format of the original Unix V7 tar tool prior to standardization. + formatV7 + + // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988. + // + // While this format is compatible with most tar readers, + // the format has several limitations making it unsuitable for some usages. + // Most notably, it cannot support sparse files, files larger than 8GiB, + // filenames larger than 256 characters, and non-ASCII filenames. + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + FormatUSTAR + + // FormatPAX represents the PAX header format defined in POSIX.1-2001. + // + // PAX extends USTAR by writing a special file with Typeflag TypeXHeader + // preceding the original header. This file contains a set of key-value + // records, which are used to overcome USTAR's shortcomings, in addition to + // providing the ability to have sub-second resolution for timestamps. + // + // Some newer formats add their own extensions to PAX by defining their + // own keys and assigning certain semantic meaning to the associated values. + // For example, sparse file support in PAX is implemented using keys + // defined by the GNU manual (e.g., "GNU.sparse.map"). + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html + FormatPAX + + // FormatGNU represents the GNU header format. + // + // The GNU header format is older than the USTAR and PAX standards and + // is not compatible with them. The GNU format supports + // arbitrary file sizes, filenames of arbitrary encoding and length, + // sparse files, and other features. + // + // It is recommended that PAX be chosen over GNU unless the target + // application can only parse GNU formatted archives. + // + // Reference: + // https://www.gnu.org/software/tar/manual/html_node/Standard.html + FormatGNU + + // Schily's tar format, which is incompatible with USTAR. + // This does not cover STAR extensions to the PAX format; these fall under + // the PAX format. + formatSTAR + + formatMax +) + +func (f Format) has(f2 Format) bool { return f&f2 != 0 } +func (f *Format) mayBe(f2 Format) { *f |= f2 } +func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } +func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } + +var formatNames = map[Format]string{ + formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", +} + +func (f Format) String() string { + var ss []string + for f2 := Format(1); f2 < formatMax; f2 <<= 1 { + if f.has(f2) { + ss = append(ss, formatNames[f2]) + } + } + switch len(ss) { + case 0: + return "" + case 1: + return ss[0] + default: + return "(" + strings.Join(ss, " | ") + ")" + } +} + +// Magics used to identify various formats. +const ( + magicGNU, versionGNU = "ustar ", " \x00" + magicUSTAR, versionUSTAR = "ustar\x00", "00" + trailerSTAR = "tar\x00" +) + +// Size constants from various tar specifications. +const ( + blockSize = 512 // Size of each block in a tar stream + nameSize = 100 // Max length of the name field in USTAR format + prefixSize = 155 // Max length of the prefix field in USTAR format + + // Max length of a special file (PAX header, GNU long name or link). + // This matches the limit used by libarchive. + maxSpecialFileSize = 1 << 20 +) + +// blockPadding computes the number of bytes needed to pad offset up to the +// nearest block edge where 0 <= n < blockSize. +func blockPadding(offset int64) (n int64) { + return -offset & (blockSize - 1) +} + +var zeroBlock block + +type block [blockSize]byte + +// Convert block to any number of formats. +func (b *block) V7() *headerV7 { return (*headerV7)(b) } +func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } +func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } +func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } +func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } + +// GetFormat checks that the block is a valid tar header based on the checksum. +// It then attempts to guess the specific format based on magic values. +// If the checksum fails, then FormatUnknown is returned. +func (b *block) GetFormat() Format { + // Verify checksum. + var p parser + value := p.parseOctal(b.V7().Chksum()) + chksum1, chksum2 := b.ComputeChecksum() + if p.err != nil || (value != chksum1 && value != chksum2) { + return FormatUnknown + } + + // Guess the magic values. + magic := string(b.USTAR().Magic()) + version := string(b.USTAR().Version()) + trailer := string(b.STAR().Trailer()) + switch { + case magic == magicUSTAR && trailer == trailerSTAR: + return formatSTAR + case magic == magicUSTAR: + return FormatUSTAR | FormatPAX + case magic == magicGNU && version == versionGNU: + return FormatGNU + default: + return formatV7 + } +} + +// SetFormat writes the magic values necessary for specified format +// and then updates the checksum accordingly. +func (b *block) SetFormat(format Format) { + // Set the magic values. + switch { + case format.has(formatV7): + // Do nothing. + case format.has(FormatGNU): + copy(b.GNU().Magic(), magicGNU) + copy(b.GNU().Version(), versionGNU) + case format.has(formatSTAR): + copy(b.STAR().Magic(), magicUSTAR) + copy(b.STAR().Version(), versionUSTAR) + copy(b.STAR().Trailer(), trailerSTAR) + case format.has(FormatUSTAR | FormatPAX): + copy(b.USTAR().Magic(), magicUSTAR) + copy(b.USTAR().Version(), versionUSTAR) + default: + panic("invalid format") + } + + // Update checksum. + // This field is special in that it is terminated by a NULL then space. + var f formatter + field := b.V7().Chksum() + chksum, _ := b.ComputeChecksum() // Possible values are 256..128776 + f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143 + field[7] = ' ' +} + +// ComputeChecksum computes the checksum for the header block. +// POSIX specifies a sum of the unsigned byte values, but the Sun tar used +// signed byte values. +// We compute and return both. +func (b *block) ComputeChecksum() (unsigned, signed int64) { + for i, c := range b { + if 148 <= i && i < 156 { + c = ' ' // Treat the checksum field itself as all spaces. + } + unsigned += int64(c) + signed += int64(int8(c)) + } + return unsigned, signed +} + +// Reset clears the block with all zeros. +func (b *block) Reset() { + *b = block{} +} + +type headerV7 [blockSize]byte + +func (h *headerV7) Name() []byte { return h[000:][:100] } +func (h *headerV7) Mode() []byte { return h[100:][:8] } +func (h *headerV7) UID() []byte { return h[108:][:8] } +func (h *headerV7) GID() []byte { return h[116:][:8] } +func (h *headerV7) Size() []byte { return h[124:][:12] } +func (h *headerV7) ModTime() []byte { return h[136:][:12] } +func (h *headerV7) Chksum() []byte { return h[148:][:8] } +func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } +func (h *headerV7) LinkName() []byte { return h[157:][:100] } + +type headerGNU [blockSize]byte + +func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerGNU) Magic() []byte { return h[257:][:6] } +func (h *headerGNU) Version() []byte { return h[263:][:2] } +func (h *headerGNU) UserName() []byte { return h[265:][:32] } +func (h *headerGNU) GroupName() []byte { return h[297:][:32] } +func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } +func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } +func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } +func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } +func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } +func (h *headerGNU) RealSize() []byte { return h[483:][:12] } + +type headerSTAR [blockSize]byte + +func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerSTAR) Version() []byte { return h[263:][:2] } +func (h *headerSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } +func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } +func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } +func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } + +type headerUSTAR [blockSize]byte + +func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerUSTAR) Version() []byte { return h[263:][:2] } +func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } + +type sparseArray []byte + +func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } +func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } +func (s sparseArray) MaxEntries() int { return len(s) / 24 } + +type sparseElem []byte + +func (s sparseElem) Offset() []byte { return s[00:][:12] } +func (s sparseElem) Length() []byte { return s[12:][:12] } diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/reader.go new file mode 100644 index 0000000000..a645c41605 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -0,0 +1,944 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io" + "strconv" + "strings" + "time" +) + +// Reader provides sequential access to the contents of a tar archive. +// Reader.Next advances to the next file in the archive (including the first), +// and then Reader can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + pad int64 // Amount of padding (ignored) after current file entry + curr fileReader // Reader for current file entry + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Reader to + // ensure that this error is sticky. + err error + + RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this. + rawBytes *bytes.Buffer // last raw bits +} + +type fileReader interface { + io.Reader + fileState + + WriteTo(io.Writer) (int64, error) +} + +// RawBytes accesses the raw bytes of the archive, apart from the file payload itself. +// This includes the header and padding. +// +// # This call resets the current rawbytes buffer +// +// Only when RawAccounting is enabled, otherwise this returns nil +func (tr *Reader) RawBytes() []byte { + if !tr.RawAccounting { + return nil + } + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } + defer tr.rawBytes.Reset() // if we've read them, then flush them. + + return tr.rawBytes.Bytes() + +} + +// ExpectedPadding returns the number of bytes of padding expected after the last header returned by Next() +func (tr *Reader) ExpectedPadding() int64 { + return tr.pad +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { + return &Reader{r: r, curr: ®FileReader{r, 0}} +} + +// Next advances to the next entry in the tar archive. +// The Header.Size determines how many bytes can be read for the next file. +// Any remaining data in the current file is automatically discarded. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + hdr, err := tr.next() + tr.err = err + return hdr, err +} + +func (tr *Reader) next() (*Header, error) { + var paxHdrs map[string]string + var gnuLongName, gnuLongLink string + + if tr.RawAccounting { + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } else { + tr.rawBytes.Reset() + } + } + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". + format := FormatUSTAR | FormatPAX | FormatGNU + for { + // Discard the remainder of the file and any padding. + if err := discard(tr, tr.curr.PhysicalRemaining()); err != nil { + return nil, err + } + n, err := tryReadFull(tr.r, tr.blk[:tr.pad]) + if err != nil { + return nil, err + } + if tr.RawAccounting { + tr.rawBytes.Write(tr.blk[:n]) + } + tr.pad = 0 + + hdr, rawHdr, err := tr.readHeader() + if err != nil { + return nil, err + } + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + format.mayOnlyBe(hdr.Format) + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader, TypeXGlobalHeader: + format.mayOnlyBe(FormatPAX) + paxHdrs, err = parsePAX(tr) + if err != nil { + return nil, err + } + if hdr.Typeflag == TypeXGlobalHeader { + if err = mergePAX(hdr, paxHdrs); err != nil { + return nil, err + } + return &Header{ + Name: hdr.Name, + Typeflag: hdr.Typeflag, + Xattrs: hdr.Xattrs, + PAXRecords: hdr.PAXRecords, + Format: format, + }, nil + } + continue // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + format.mayOnlyBe(FormatGNU) + realname, err := readSpecialFile(tr) + if err != nil { + return nil, err + } + + if tr.RawAccounting { + tr.rawBytes.Write(realname) + } + + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + gnuLongName = p.parseString(realname) + case TypeGNULongLink: + gnuLongLink = p.parseString(realname) + } + continue // This is a meta header affecting the next header + default: + // The old GNU sparse format is handled here since it is technically + // just a regular file with additional attributes. + + if err := mergePAX(hdr, paxHdrs); err != nil { + return nil, err + } + if gnuLongName != "" { + hdr.Name = gnuLongName + } + if gnuLongLink != "" { + hdr.Linkname = gnuLongLink + } + if hdr.Typeflag == TypeRegA { + if strings.HasSuffix(hdr.Name, "/") { + hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories + } else { + hdr.Typeflag = TypeReg + } + } + + // The extended headers may have updated the size. + // Thus, setup the regFileReader again after merging PAX headers. + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + + // Sparse formats rely on being able to read from the logical data + // section; there must be a preceding call to handleRegularFile. + if err := tr.handleSparseFile(hdr, rawHdr); err != nil { + return nil, err + } + + // Set the final guess at the format. + if format.has(FormatUSTAR) && format.has(FormatPAX) { + format.mayOnlyBe(FormatUSTAR) + } + hdr.Format = format + return hdr, nil // This is a file, so stop + } + } +} + +// handleRegularFile sets up the current file reader and padding such that it +// can only read the following logical data section. It will properly handle +// special headers that contain no data section. +func (tr *Reader) handleRegularFile(hdr *Header) error { + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + return ErrHeader + } + + tr.pad = blockPadding(nb) + tr.curr = ®FileReader{r: tr.r, nb: nb} + return nil +} + +// handleSparseFile checks if the current file is a sparse format of any type +// and sets the curr reader appropriately. +func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { + var spd sparseDatas + var err error + if hdr.Typeflag == TypeGNUSparse { + spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) + } else { + spd, err = tr.readGNUSparsePAXHeaders(hdr) + } + + // If sp is non-nil, then this is a sparse file. + // Note that it is possible for len(sp) == 0. + if err == nil && spd != nil { + if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { + return ErrHeader + } + sph := invertSparseEntries(spd, hdr.Size) + tr.curr = &sparseFileReader{tr.curr, sph, 0} + } + return err +} + +// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. +// If they are found, then this function reads the sparse map and returns it. +// This assumes that 0.0 headers have already been converted to 0.1 headers +// by the PAX header parsing logic. +func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { + // Identify the version of GNU headers. + var is1x0 bool + major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] + switch { + case major == "0" && (minor == "0" || minor == "1"): + is1x0 = false + case major == "1" && minor == "0": + is1x0 = true + case major != "" || minor != "": + return nil, nil // Unknown GNU sparse PAX version + case hdr.PAXRecords[paxGNUSparseMap] != "": + is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess + default: + return nil, nil // Not a PAX format GNU sparse file. + } + hdr.Format.mayOnlyBe(FormatPAX) + + // Update hdr from GNU sparse PAX headers. + if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { + hdr.Name = name + } + size := hdr.PAXRecords[paxGNUSparseSize] + if size == "" { + size = hdr.PAXRecords[paxGNUSparseRealSize] + } + if size != "" { + n, err := strconv.ParseInt(size, 10, 64) + if err != nil { + return nil, ErrHeader + } + hdr.Size = n + } + + // Read the sparse map according to the appropriate format. + if is1x0 { + return readGNUSparseMap1x0(tr.curr) + } + return readGNUSparseMap0x1(hdr.PAXRecords) +} + +// mergePAX merges paxHdrs into hdr for all relevant fields of Header. +func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { + for k, v := range paxHdrs { + if v == "" { + continue // Keep the original USTAR value + } + var id64 int64 + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxUname: + hdr.Uname = v + case paxGname: + hdr.Gname = v + case paxUid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Uid = int(id64) // Integer overflow possible + case paxGid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Gid = int(id64) // Integer overflow possible + case paxAtime: + hdr.AccessTime, err = parsePAXTime(v) + case paxMtime: + hdr.ModTime, err = parsePAXTime(v) + case paxCtime: + hdr.ChangeTime, err = parsePAXTime(v) + case paxSize: + hdr.Size, err = strconv.ParseInt(v, 10, 64) + default: + if strings.HasPrefix(k, paxSchilyXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxSchilyXattr):]] = v + } + } + if err != nil { + return ErrHeader + } + } + hdr.PAXRecords = paxHdrs + return nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := readSpecialFile(r) + if err != nil { + return nil, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into format 0.1 + // headers since 0.0 headers were not PAX compliant. + var sparseMap []string + + paxHdrs := make(map[string]string) + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + switch key { + case paxGNUSparseOffset, paxGNUSparseNumBytes: + // Validate sparse header order and value. + if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || + (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || + strings.Contains(value, ",") { + return nil, ErrHeader + } + sparseMap = append(sparseMap, value) + default: + paxHdrs[key] = value + } + } + if len(sparseMap) > 0 { + paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") + } + return paxHdrs, nil +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. It returns the raw block of the +// header in case further processing is required. +// +// The err will be set to io.EOF only when one of the following occurs: +// - Exactly 0 bytes are read and EOF is hit. +// - Exactly 1 block of zeros is read and EOF is hit. +// - At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() (*Header, *block, error) { + // Two blocks of zero bytes marks the end of the archive. + n, err := io.ReadFull(tr.r, tr.blk[:]) + if tr.RawAccounting && (err == nil || err == io.EOF) { + tr.rawBytes.Write(tr.blk[:n]) + } + if err != nil { + return nil, nil, err // EOF is okay here; exactly 0 bytes read + } + + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + n, err = io.ReadFull(tr.r, tr.blk[:]) + if tr.RawAccounting && (err == nil || err == io.EOF) { + tr.rawBytes.Write(tr.blk[:n]) + } + if err != nil { + return nil, nil, err // EOF is okay here; exactly 1 block of zeros read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read + } + return nil, nil, ErrHeader // Zero block and then non-zero block + } + + // Verify the header matches a known format. + format := tr.blk.GetFormat() + if format == FormatUnknown { + return nil, nil, ErrHeader + } + + var p parser + hdr := new(Header) + + // Unpack the V7 header. + v7 := tr.blk.V7() + hdr.Typeflag = v7.TypeFlag()[0] + hdr.Name = p.parseString(v7.Name()) + hdr.Linkname = p.parseString(v7.LinkName()) + hdr.Size = p.parseNumeric(v7.Size()) + hdr.Mode = p.parseNumeric(v7.Mode()) + hdr.Uid = int(p.parseNumeric(v7.UID())) + hdr.Gid = int(p.parseNumeric(v7.GID())) + hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) + + // Unpack format specific fields. + if format > formatV7 { + ustar := tr.blk.USTAR() + hdr.Uname = p.parseString(ustar.UserName()) + hdr.Gname = p.parseString(ustar.GroupName()) + hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) + hdr.Devminor = p.parseNumeric(ustar.DevMinor()) + + var prefix string + switch { + case format.has(FormatUSTAR | FormatPAX): + hdr.Format = format + ustar := tr.blk.USTAR() + prefix = p.parseString(ustar.Prefix()) + + // For Format detection, check if block is properly formatted since + // the parser is more liberal than what USTAR actually permits. + notASCII := func(r rune) bool { return r >= 0x80 } + if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { + hdr.Format = FormatUnknown // Non-ASCII characters in block. + } + nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } + if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && + nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { + hdr.Format = FormatUnknown // Numeric fields must end in NUL + } + case format.has(formatSTAR): + star := tr.blk.STAR() + prefix = p.parseString(star.Prefix()) + hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) + case format.has(FormatGNU): + hdr.Format = format + var p2 parser + gnu := tr.blk.GNU() + if b := gnu.AccessTime(); b[0] != 0 { + hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) + } + if b := gnu.ChangeTime(); b[0] != 0 { + hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) + } + + // Prior to Go1.8, the Writer had a bug where it would output + // an invalid tar file in certain rare situations because the logic + // incorrectly believed that the old GNU format had a prefix field. + // This is wrong and leads to an output file that mangles the + // atime and ctime fields, which are often left unused. + // + // In order to continue reading tar files created by former, buggy + // versions of Go, we skeptically parse the atime and ctime fields. + // If we are unable to parse them and the prefix field looks like + // an ASCII string, then we fallback on the pre-Go1.8 behavior + // of treating these fields as the USTAR prefix field. + // + // Note that this will not use the fallback logic for all possible + // files generated by a pre-Go1.8 toolchain. If the generated file + // happened to have a prefix field that parses as valid + // atime and ctime fields (e.g., when they are valid octal strings), + // then it is impossible to distinguish between an valid GNU file + // and an invalid pre-Go1.8 file. + // + // See https://golang.org/issues/12594 + // See https://golang.org/issues/21005 + if p2.err != nil { + hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} + ustar := tr.blk.USTAR() + if s := p.parseString(ustar.Prefix()); isASCII(s) { + prefix = s + } + hdr.Format = FormatUnknown // Buggy file is not GNU + } + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + return hdr, &tr.blk, p.err +} + +// readOldGNUSparseMap reads the sparse map from the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. +// If it's larger than four entries, then one or more extension headers are used +// to store the rest of the sparse map. +// +// The Header.Size does not reflect the size of any extended headers used. +// Thus, this function will read from the raw io.Reader to fetch extra headers. +// This method mutates blk in the process. +func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { + // Make sure that the input format is GNU. + // Unfortunately, the STAR format also has a sparse header format that uses + // the same type flag but has a completely different layout. + if blk.GetFormat() != FormatGNU { + return nil, ErrHeader + } + hdr.Format.mayOnlyBe(FormatGNU) + + var p parser + hdr.Size = p.parseNumeric(blk.GNU().RealSize()) + if p.err != nil { + return nil, p.err + } + s := blk.GNU().Sparse() + spd := make(sparseDatas, 0, s.MaxEntries()) + for { + for i := 0; i < s.MaxEntries(); i++ { + // This termination condition is identical to GNU and BSD tar. + if s.Entry(i).Offset()[0] == 0x00 { + break // Don't return, need to process extended headers (even if empty) + } + offset := p.parseNumeric(s.Entry(i).Offset()) + length := p.parseNumeric(s.Entry(i).Length()) + if p.err != nil { + return nil, p.err + } + spd = append(spd, sparseEntry{Offset: offset, Length: length}) + } + + if s.IsExtended()[0] > 0 { + // There are more entries. Read an extension header and parse its entries. + if _, err := mustReadFull(tr.r, blk[:]); err != nil { + return nil, err + } + if tr.RawAccounting { + tr.rawBytes.Write(blk[:]) + } + s = blk.Sparse() + continue + } + return spd, nil // Done + } +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, length). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { + var ( + cntNewline int64 + buf bytes.Buffer + blk block + totalSize int + ) + + // feedTokens copies data in blocks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + feedTokens := func(n int64) error { + for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } + if _, err := mustReadFull(r, blk[:]); err != nil { + return err + } + buf.Write(blk[:]) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + nextToken := func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return strings.TrimRight(tok, "\n") + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + spd := make(sparseDatas, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err1 := strconv.ParseInt(nextToken(), 10, 64) + length, err2 := strconv.ParseInt(nextToken(), 10, 64) + if err1 != nil || err2 != nil { + return nil, ErrHeader + } + spd = append(spd, sparseEntry{Offset: offset, Length: length}) + } + return spd, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") + if len(sparseMap) == 1 && sparseMap[0] == "" { + sparseMap = sparseMap[:0] + } + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + spd := make(sparseDatas, 0, numEntries) + for len(sparseMap) >= 2 { + offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) + length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) + if err1 != nil || err2 != nil { + return nil, ErrHeader + } + spd = append(spd, sparseEntry{Offset: offset, Length: length}) + sparseMap = sparseMap[2:] + } + return spd, nil +} + +// Read reads from the current file in the tar archive. +// It returns (0, io.EOF) when it reaches the end of that file, +// until Next is called to advance to the next file. +// +// If the current file is sparse, then the regions marked as a hole +// are read back as NUL-bytes. +// +// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (int, error) { + if tr.err != nil { + return 0, tr.err + } + n, err := tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return n, err +} + +// writeTo writes the content of the current file to w. +// The bytes written matches the number of remaining bytes in the current file. +// +// If the current file is sparse and w is an io.WriteSeeker, +// then writeTo uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are filled with NULs. +// This always writes the last byte to ensure w is the right size. +// +// TODO(dsnet): Re-export this when adding sparse file support. +// See https://golang.org/issue/22735 +func (tr *Reader) writeTo(w io.Writer) (int64, error) { + if tr.err != nil { + return 0, tr.err + } + n, err := tr.curr.WriteTo(w) + if err != nil { + tr.err = err + } + return n, err +} + +// regFileReader is a fileReader for reading data from a regular file entry. +type regFileReader struct { + r io.Reader // Underlying Reader + nb int64 // Number of remaining bytes to read +} + +func (fr *regFileReader) Read(b []byte) (n int, err error) { + if int64(len(b)) > fr.nb { + b = b[:fr.nb] + } + if len(b) > 0 { + n, err = fr.r.Read(b) + fr.nb -= int64(n) + } + switch { + case err == io.EOF && fr.nb > 0: + return n, io.ErrUnexpectedEOF + case err == nil && fr.nb == 0: + return n, io.EOF + default: + return n, err + } +} + +func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { + return io.Copy(w, struct{ io.Reader }{fr}) +} + +func (fr regFileReader) LogicalRemaining() int64 { + return fr.nb +} + +func (fr regFileReader) PhysicalRemaining() int64 { + return fr.nb +} + +// sparseFileReader is a fileReader for reading data from a sparse file entry. +type sparseFileReader struct { + fr fileReader // Underlying fileReader + sp sparseHoles // Normalized list of sparse holes + pos int64 // Current position in sparse file +} + +func (sr *sparseFileReader) Read(b []byte) (n int, err error) { + finished := int64(len(b)) >= sr.LogicalRemaining() + if finished { + b = b[:sr.LogicalRemaining()] + } + + b0 := b + endPos := sr.pos + int64(len(b)) + for endPos > sr.pos && err == nil { + var nf int // Bytes read in fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + bf := b[:min(int64(len(b)), holeStart-sr.pos)] + nf, err = tryReadFull(sr.fr, bf) + } else { // In a hole fragment + bf := b[:min(int64(len(b)), holeEnd-sr.pos)] + nf, err = tryReadFull(zeroReader{}, bf) + } + b = b[nf:] + sr.pos += int64(nf) + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + case finished: + return n, io.EOF + default: + return n, nil + } +} + +func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { + ws, ok := w.(io.WriteSeeker) + if ok { + if _, err := ws.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(w, struct{ io.Reader }{sr}) + } + + var writeLastByte bool + pos0 := sr.pos + for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { + var nf int64 // Size of fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + nf = holeStart - sr.pos + nf, err = io.CopyN(ws, sr.fr, nf) + } else { // In a hole fragment + nf = holeEnd - sr.pos + if sr.PhysicalRemaining() == 0 { + writeLastByte = true + nf-- + } + _, err = ws.Seek(nf, io.SeekCurrent) + } + sr.pos += nf + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // write a single byte to ensure the file is the right size. + if writeLastByte && err == nil { + _, err = ws.Write([]byte{0}) + sr.pos++ + } + + n = sr.pos - pos0 + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + default: + return n, nil + } +} + +func (sr sparseFileReader) LogicalRemaining() int64 { + return sr.sp[len(sr.sp)-1].endOffset() - sr.pos +} +func (sr sparseFileReader) PhysicalRemaining() int64 { + return sr.fr.PhysicalRemaining() +} + +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// mustReadFull is like io.ReadFull except it returns +// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. +func mustReadFull(r io.Reader, b []byte) (int, error) { + n, err := tryReadFull(r, b) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err +} + +// tryReadFull is like io.ReadFull except it returns +// io.EOF when it is hit before len(b) bytes are read. +func tryReadFull(r io.Reader, b []byte) (n int, err error) { + for len(b) > n && err == nil { + var nn int + nn, err = r.Read(b[n:]) + n += nn + } + if len(b) == n && err == io.EOF { + err = nil + } + return n, err +} + +// readSpecialFile is like io.ReadAll except it returns +// ErrFieldTooLong if more than maxSpecialFileSize is read. +func readSpecialFile(r io.Reader) ([]byte, error) { + buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1)) + if len(buf) > maxSpecialFileSize { + return nil, ErrFieldTooLong + } + return buf, err +} + +// discard skips n bytes in r, reporting an error if unable to do so. +func discard(tr *Reader, n int64) error { + var seekSkipped, copySkipped int64 + var err error + r := tr.r + if tr.RawAccounting { + + copySkipped, err = io.CopyN(tr.rawBytes, tr.r, n) + goto out + } + + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + if sr, ok := r.(io.Seeker); ok && n > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, io.SeekCurrent) + if pos1 >= 0 && err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(n-1, io.SeekCurrent) + if pos2 < 0 || err != nil { + return err + } + seekSkipped = pos2 - pos1 + } + } + + copySkipped, err = io.CopyN(io.Discard, r, n-seekSkipped) +out: + if err == io.EOF && seekSkipped+copySkipped < n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go new file mode 100644 index 0000000000..cf9cc79c59 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go new file mode 100644 index 0000000000..6f17dbe307 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go new file mode 100644 index 0000000000..868105f338 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go @@ -0,0 +1,96 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "os/user" + "runtime" + "strconv" + "sync" + "syscall" +) + +func init() { + sysStat = statUnix +} + +// userMap and groupMap caches UID and GID lookups for performance reasons. +// The downside is that renaming uname or gname by the OS never takes effect. +var userMap, groupMap sync.Map // map[int]string + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + + // Best effort at populating Uname and Gname. + // The os/user functions may fail for any number of reasons + // (not implemented on that platform, cgo not enabled, etc). + if u, ok := userMap.Load(h.Uid); ok { + h.Uname = u.(string) + } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { + h.Uname = u.Username + userMap.Store(h.Uid, h.Uname) + } + if g, ok := groupMap.Load(h.Gid); ok { + h.Gname = g.(string) + } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { + h.Gname = g.Name + groupMap.Store(h.Gid, h.Gname) + } + + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + + // Best effort at populating Devmajor and Devminor. + if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { + dev := uint64(sys.Rdev) // May be int32 or uint32 + switch runtime.GOOS { + case "linux": + // Copied from golang.org/x/sys/unix/dev_linux.go. + major := uint32((dev & 0x00000000000fff00) >> 8) + major |= uint32((dev & 0xfffff00000000000) >> 32) + minor := uint32((dev & 0x00000000000000ff) >> 0) + minor |= uint32((dev & 0x00000ffffff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "darwin": + // Copied from golang.org/x/sys/unix/dev_darwin.go. + major := uint32((dev >> 24) & 0xff) + minor := uint32(dev & 0xffffff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "dragonfly": + // Copied from golang.org/x/sys/unix/dev_dragonfly.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "freebsd": + // Copied from golang.org/x/sys/unix/dev_freebsd.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "netbsd": + // Copied from golang.org/x/sys/unix/dev_netbsd.go. + major := uint32((dev & 0x000fff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xfff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "openbsd": + // Copied from golang.org/x/sys/unix/dev_openbsd.go. + major := uint32((dev & 0x0000ff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xffff0000) >> 8) + h.Devmajor, h.Devminor = int64(major), int64(minor) + default: + // TODO: Implement solaris (see https://golang.org/issue/8106) + } + } + return nil +} diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go new file mode 100644 index 0000000000..d144485a49 --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go @@ -0,0 +1,326 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" +) + +// hasNUL reports whether the NUL character exists within s. +func hasNUL(s string) bool { + return strings.IndexByte(s, 0) >= 0 +} + +// isASCII reports whether the input is an ASCII C-style string. +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 || c == 0x00 { + return false + } + } + return true +} + +// toASCII converts the input to an ASCII C-style string. +// This a best effort conversion, so invalid characters are dropped. +func toASCII(s string) string { + if isASCII(s) { + return s + } + b := make([]byte, 0, len(s)) + for _, c := range s { + if c < 0x80 && c != 0x00 { + b = append(b, byte(c)) + } + } + return string(b) +} + +type parser struct { + err error // Last error seen +} + +type formatter struct { + err error // Last error seen +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + if i := bytes.IndexByte(b, 0); i >= 0 { + return string(b[:i]) + } + return string(b) +} + +// formatString copies s into b, NUL-terminating if possible. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + } + copy(b, s) + if len(s) < len(b) { + b[len(s)] = 0 + } + + // Some buggy readers treat regular files with a trailing slash + // in the V7 path field as a directory even though the full path + // recorded elsewhere (e.g., via PAX record) contains no trailing slash. + if len(s) > len(b) && b[len(b)-1] == '/' { + n := len(strings.TrimRight(s[:len(b)], "/")) + b[n] = 0 // Replace trailing slash with NUL terminator + } +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + binBits := uint(n-1) * 8 + return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +// formatNumeric encodes x into b using base-8 (octal) encoding if possible. +// Otherwise it will attempt to use base-256 (binary) encoding. +func (f *formatter) formatNumeric(b []byte, x int64) { + if fitsInOctal(len(b), x) { + f.formatOctal(b, x) + return + } + + if fitsInBase256(len(b), x) { + for i := len(b) - 1; i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +func (f *formatter) formatOctal(b []byte, x int64) { + if !fitsInOctal(len(b), x) { + x = 0 // Last resort, just write zero + f.err = ErrFieldTooLong + } + + s := strconv.FormatInt(x, 8) + // Add leading zeros, but leave room for a NUL. + if n := len(b) - len(s) - 1; n > 0 { + s = strings.Repeat("0", n) + s + } + f.formatString(b, s) +} + +// fitsInOctal reports whether the integer x fits in a field n-bytes long +// using octal encoding with the appropriate NUL terminator. +func fitsInOctal(n int, x int64) bool { + octBits := uint(n-1) * 3 + return x >= 0 && (n >= 22 || x < 1<= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*nsecs), nil // Negative correction + } + return time.Unix(secs, nsecs), nil +} + +// formatPAXTime converts ts into a time of the form %d.%d as described in the +// PAX specification. This function is capable of negative timestamps. +func formatPAXTime(ts time.Time) (s string) { + secs, nsecs := ts.Unix(), ts.Nanosecond() + if nsecs == 0 { + return strconv.FormatInt(secs, 10) + } + + // If seconds is negative, then perform correction. + sign := "" + if secs < 0 { + sign = "-" // Remember sign + secs = -(secs + 1) // Add a second to secs + nsecs = -(nsecs - 1E9) // Take that second away from nsecs + } + return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") +} + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + k, v = rec[:eq], rec[eq+1:] + + if !validPAXRecord(k, v) { + return "", "", s, ErrHeader + } + return k, v, rem, nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) (string, error) { + if !validPAXRecord(k, v) { + return "", ErrHeader + } + + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := strconv.Itoa(size) + " " + k + "=" + v + "\n" + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = strconv.Itoa(size) + " " + k + "=" + v + "\n" + } + return record, nil +} + +// validPAXRecord reports whether the key-value pair is valid where each +// record is formatted as: +// "%d %s=%s\n" % (size, key, value) +// +// Keys and values should be UTF-8, but the number of bad writers out there +// forces us to be a more liberal. +// Thus, we only reject all keys with NUL, and only reject NULs in values +// for the PAX version of the USTAR string fields. +// The key must not contain an '=' character. +func validPAXRecord(k, v string) bool { + if k == "" || strings.IndexByte(k, '=') >= 0 { + return false + } + switch k { + case paxPath, paxLinkpath, paxUname, paxGname: + return !hasNUL(v) + default: + return !hasNUL(k) + } +} diff --git a/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/writer.go new file mode 100644 index 0000000000..893eac00ae --- /dev/null +++ b/openshift/tools/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -0,0 +1,656 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +// Writer provides sequential writing of a tar archive. +// Write.WriteHeader begins a new file with the provided Header, +// and then Writer can be treated as an io.Writer to supply that file's data. +type Writer struct { + w io.Writer + pad int64 // Amount of padding to write after current file entry + curr fileWriter // Writer for current file entry + hdr Header // Shallow copy of Header that is safe for mutations + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Writer to + // ensure that this error is sticky. + err error +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{w: w, curr: ®FileWriter{w, 0}} +} + +type fileWriter interface { + io.Writer + fileState + + ReadFrom(io.Reader) (int64, error) +} + +// Flush finishes writing the current file's block padding. +// The current file must be fully written before Flush can be called. +// +// This is unnecessary as the next call to WriteHeader or Close +// will implicitly flush out the file's padding. +func (tw *Writer) Flush() error { + if tw.err != nil { + return tw.err + } + if nb := tw.curr.LogicalRemaining(); nb > 0 { + return fmt.Errorf("archive/tar: missed writing %d bytes", nb) + } + if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { + return tw.err + } + tw.pad = 0 + return nil +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// The Header.Size determines how many bytes can be written for the next file. +// If the current file is not fully written, then this returns an error. +// This implicitly flushes any padding necessary before writing the header. +func (tw *Writer) WriteHeader(hdr *Header) error { + if err := tw.Flush(); err != nil { + return err + } + tw.hdr = *hdr // Shallow copy of Header + + // Avoid usage of the legacy TypeRegA flag, and automatically promote + // it to use TypeReg or TypeDir. + if tw.hdr.Typeflag == TypeRegA { + if strings.HasSuffix(tw.hdr.Name, "/") { + tw.hdr.Typeflag = TypeDir + } else { + tw.hdr.Typeflag = TypeReg + } + } + + // Round ModTime and ignore AccessTime and ChangeTime unless + // the format is explicitly chosen. + // This ensures nominal usage of WriteHeader (without specifying the format) + // does not always result in the PAX format being chosen, which + // causes a 1KiB increase to every header. + if tw.hdr.Format == FormatUnknown { + tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) + tw.hdr.AccessTime = time.Time{} + tw.hdr.ChangeTime = time.Time{} + } + + allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() + switch { + case allowedFormats.has(FormatUSTAR): + tw.err = tw.writeUSTARHeader(&tw.hdr) + return tw.err + case allowedFormats.has(FormatPAX): + tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) + return tw.err + case allowedFormats.has(FormatGNU): + tw.err = tw.writeGNUHeader(&tw.hdr) + return tw.err + default: + return err // Non-fatal error + } +} + +func (tw *Writer) writeUSTARHeader(hdr *Header) error { + // Check if we can use USTAR prefix/suffix splitting. + var namePrefix string + if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { + namePrefix, hdr.Name = prefix, suffix + } + + // Pack the main header. + var f formatter + blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) + f.formatString(blk.USTAR().Prefix(), namePrefix) + blk.SetFormat(FormatUSTAR) + if f.err != nil { + return f.err // Should never happen since header is validated + } + return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) +} + +func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { + realName, realSize := hdr.Name, hdr.Size + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Handle sparse files. + var spd sparseDatas + var spb []byte + if len(hdr.SparseHoles) > 0 { + sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + hdr.Size = 0 // Replace with encoded size + spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') + for _, s := range spd { + hdr.Size += s.Length + spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') + spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') + } + pad := blockPadding(int64(len(spb))) + spb = append(spb, zeroBlock[:pad]...) + hdr.Size += int64(len(spb)) // Accounts for encoded sparse map + + // Add and modify appropriate PAX records. + dir, file := path.Split(realName) + hdr.Name = path.Join(dir, "GNUSparseFile.0", file) + paxHdrs[paxGNUSparseMajor] = "1" + paxHdrs[paxGNUSparseMinor] = "0" + paxHdrs[paxGNUSparseName] = realName + paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) + paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) + delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName + } + */ + _ = realSize + + // Write PAX records to the output. + isGlobal := hdr.Typeflag == TypeXGlobalHeader + if len(paxHdrs) > 0 || isGlobal { + // Sort keys for deterministic ordering. + var keys []string + for k := range paxHdrs { + keys = append(keys, k) + } + sort.Strings(keys) + + // Write each record to a buffer. + var buf strings.Builder + for _, k := range keys { + rec, err := formatPAXRecord(k, paxHdrs[k]) + if err != nil { + return err + } + buf.WriteString(rec) + } + + // Write the extended header file. + var name string + var flag byte + if isGlobal { + name = realName + if name == "" { + name = "GlobalHead.0.0" + } + flag = TypeXGlobalHeader + } else { + dir, file := path.Split(realName) + name = path.Join(dir, "PaxHeaders.0", file) + flag = TypeXHeader + } + data := buf.String() + if len(data) > maxSpecialFileSize { + return ErrFieldTooLong + } + if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { + return err // Global headers return here + } + } + + // Pack the main header. + var f formatter // Ignore errors since they are expected + fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } + blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) + blk.SetFormat(FormatPAX) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Write the sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.curr since the sparse map is accounted for in hdr.Size. + if _, err := tw.curr.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + */ + return nil +} + +func (tw *Writer) writeGNUHeader(hdr *Header) error { + // Use long-link files if Name or Linkname exceeds the field size. + const longName = "././@LongLink" + if len(hdr.Name) > nameSize { + data := hdr.Name + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { + return err + } + } + if len(hdr.Linkname) > nameSize { + data := hdr.Linkname + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { + return err + } + } + + // Pack the main header. + var f formatter // Ignore errors since they are expected + var spd sparseDatas + var spb []byte + blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) + if !hdr.AccessTime.IsZero() { + f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) + } + if !hdr.ChangeTime.IsZero() { + f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) + } + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + if hdr.Typeflag == TypeGNUSparse { + sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { + for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { + f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) + f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) + sp = sp[1:] + } + if len(sp) > 0 { + sa.IsExtended()[0] = 1 + } + return sp + } + sp2 := formatSPD(spd, blk.GNU().Sparse()) + for len(sp2) > 0 { + var spHdr block + sp2 = formatSPD(sp2, spHdr.Sparse()) + spb = append(spb, spHdr[:]...) + } + + // Update size fields in the header block. + realSize := hdr.Size + hdr.Size = 0 // Encoded size; does not account for encoded sparse map + for _, s := range spd { + hdr.Size += s.Length + } + copy(blk.V7().Size(), zeroBlock[:]) // Reset field + f.formatNumeric(blk.V7().Size(), hdr.Size) + f.formatNumeric(blk.GNU().RealSize(), realSize) + } + */ + blk.SetFormat(FormatGNU) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // Write the extended sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.w since the sparse map is not accounted for in hdr.Size. + if _, err := tw.w.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + return nil +} + +type ( + stringFormatter func([]byte, string) + numberFormatter func([]byte, int64) +) + +// templateV7Plus fills out the V7 fields of a block using values from hdr. +// It also fills out fields (uname, gname, devmajor, devminor) that are +// shared in the USTAR, PAX, and GNU formats using the provided formatters. +// +// The block returned is only valid until the next call to +// templateV7Plus or writeRawFile. +func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { + tw.blk.Reset() + + modTime := hdr.ModTime + if modTime.IsZero() { + modTime = time.Unix(0, 0) + } + + v7 := tw.blk.V7() + v7.TypeFlag()[0] = hdr.Typeflag + fmtStr(v7.Name(), hdr.Name) + fmtStr(v7.LinkName(), hdr.Linkname) + fmtNum(v7.Mode(), hdr.Mode) + fmtNum(v7.UID(), int64(hdr.Uid)) + fmtNum(v7.GID(), int64(hdr.Gid)) + fmtNum(v7.Size(), hdr.Size) + fmtNum(v7.ModTime(), modTime.Unix()) + + ustar := tw.blk.USTAR() + fmtStr(ustar.UserName(), hdr.Uname) + fmtStr(ustar.GroupName(), hdr.Gname) + fmtNum(ustar.DevMajor(), hdr.Devmajor) + fmtNum(ustar.DevMinor(), hdr.Devminor) + + return &tw.blk +} + +// writeRawFile writes a minimal file with the given name and flag type. +// It uses format to encode the header format and will write data as the body. +// It uses default values for all of the other fields (as BSD and GNU tar does). +func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { + tw.blk.Reset() + + // Best effort for the filename. + name = toASCII(name) + if len(name) > nameSize { + name = name[:nameSize] + } + name = strings.TrimRight(name, "/") + + var f formatter + v7 := tw.blk.V7() + v7.TypeFlag()[0] = flag + f.formatString(v7.Name(), name) + f.formatOctal(v7.Mode(), 0) + f.formatOctal(v7.UID(), 0) + f.formatOctal(v7.GID(), 0) + f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB + f.formatOctal(v7.ModTime(), 0) + tw.blk.SetFormat(format) + if f.err != nil { + return f.err // Only occurs if size condition is violated + } + + // Write the header and data. + if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { + return err + } + _, err := io.WriteString(tw, data) + return err +} + +// writeRawHeader writes the value of blk, regardless of its value. +// It sets up the Writer such that it can accept a file of the given size. +// If the flag is a special header-only flag, then the size is treated as zero. +func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { + if err := tw.Flush(); err != nil { + return err + } + if _, err := tw.w.Write(blk[:]); err != nil { + return err + } + if isHeaderOnlyType(flag) { + size = 0 + } + tw.curr = ®FileWriter{tw.w, size} + tw.pad = blockPadding(size) + return nil +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= nameSize || !isASCII(name) { + return "", "", false + } else if length > prefixSize+1 { + length = prefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// Write writes to the current file in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// Header.Size bytes are written after WriteHeader. +// +// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless +// of what the Header.Size claims. +func (tw *Writer) Write(b []byte) (int, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.Write(b) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// readFrom populates the content of the current file by reading from r. +// The bytes read must match the number of remaining bytes in the current file. +// +// If the current file is sparse and r is an io.ReadSeeker, +// then readFrom uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are all NULs. +// This always reads the last byte to ensure r is the right size. +// +// TODO(dsnet): Re-export this when adding sparse file support. +// See https://golang.org/issue/22735 +func (tw *Writer) readFrom(r io.Reader) (int64, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.ReadFrom(r) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// Close closes the tar archive by flushing the padding, and writing the footer. +// If the current file (from a prior call to WriteHeader) is not fully written, +// then this returns an error. +func (tw *Writer) Close() error { + if tw.err == ErrWriteAfterClose { + return nil + } + if tw.err != nil { + return tw.err + } + + // Trailer: two zero blocks. + err := tw.Flush() + for i := 0; i < 2 && err == nil; i++ { + _, err = tw.w.Write(zeroBlock[:]) + } + + // Ensure all future actions are invalid. + tw.err = ErrWriteAfterClose + return err // Report IO errors +} + +// regFileWriter is a fileWriter for writing data to a regular file entry. +type regFileWriter struct { + w io.Writer // Underlying Writer + nb int64 // Number of remaining bytes to write +} + +func (fw *regFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > fw.nb + if overwrite { + b = b[:fw.nb] + } + if len(b) > 0 { + n, err = fw.w.Write(b) + fw.nb -= int64(n) + } + switch { + case err != nil: + return n, err + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(struct{ io.Writer }{fw}, r) +} + +func (fw regFileWriter) LogicalRemaining() int64 { + return fw.nb +} +func (fw regFileWriter) PhysicalRemaining() int64 { + return fw.nb +} + +// sparseFileWriter is a fileWriter for writing data to a sparse file entry. +type sparseFileWriter struct { + fw fileWriter // Underlying fileWriter + sp sparseDatas // Normalized list of data fragments + pos int64 // Current position in sparse file +} + +func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > sw.LogicalRemaining() + if overwrite { + b = b[:sw.LogicalRemaining()] + } + + b0 := b + endPos := sw.pos + int64(len(b)) + for endPos > sw.pos && err == nil { + var nf int // Bytes written in fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + bf := b[:min(int64(len(b)), dataStart-sw.pos)] + nf, err = zeroWriter{}.Write(bf) + } else { // In a data fragment + bf := b[:min(int64(len(b)), dataEnd-sw.pos)] + nf, err = sw.fw.Write(bf) + } + b = b[nf:] + sw.pos += int64(nf) + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { + rs, ok := r.(io.ReadSeeker) + if ok { + if _, err := rs.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(struct{ io.Writer }{sw}, r) + } + + var readLastByte bool + pos0 := sw.pos + for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { + var nf int64 // Size of fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + nf = dataStart - sw.pos + if sw.PhysicalRemaining() == 0 { + readLastByte = true + nf-- + } + _, err = rs.Seek(nf, io.SeekCurrent) + } else { // In a data fragment + nf = dataEnd - sw.pos + nf, err = io.CopyN(sw.fw, rs, nf) + } + sw.pos += nf + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // read a single byte to ensure the file is the right size. + if readLastByte && err == nil { + _, err = mustReadFull(rs, []byte{0}) + sw.pos++ + } + + n = sw.pos - pos0 + switch { + case err == io.EOF: + return n, io.ErrUnexpectedEOF + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + default: + return n, ensureEOF(rs) + } +} + +func (sw sparseFileWriter) LogicalRemaining() int64 { + return sw.sp[len(sw.sp)-1].endOffset() - sw.pos +} +func (sw sparseFileWriter) PhysicalRemaining() int64 { + return sw.fw.PhysicalRemaining() +} + +// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. +type zeroWriter struct{} + +func (zeroWriter) Write(b []byte) (int, error) { + for i, c := range b { + if c != 0 { + return i, errWriteHole + } + } + return len(b), nil +} + +// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. +func ensureEOF(r io.Reader) error { + n, err := tryReadFull(r, []byte{0}) + switch { + case n > 0: + return ErrWriteTooLong + case err == io.EOF: + return nil + default: + return err + } +} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/LICENSE b/openshift/tools/vendor/go.opentelemetry.io/otel/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/README.md b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/README.md deleted file mode 100644 index 5b3da8f14c..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Attribute - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/doc.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/doc.go deleted file mode 100644 index eef51ebc2a..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package attribute provides key and value attributes. -package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/encoder.go deleted file mode 100644 index 318e42fcab..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "bytes" - "sync" - "sync/atomic" -) - -type ( - // Encoder is a mechanism for serializing an attribute set into a specific - // string representation that supports caching, to avoid repeated - // serialization. An example could be an exporter encoding the attribute - // set into a wire representation. - Encoder interface { - // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. - Encode(iterator Iterator) string - - // ID returns a value that is unique for each class of attribute - // encoder. Attribute encoders allocate these using `NewEncoderID`. - ID() EncoderID - } - - // EncoderID is used to identify distinct Encoder - // implementations, for caching encoded results. - EncoderID struct { - value uint64 - } - - // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of - // allocations used in encoding attributes. This implementation encodes a - // comma-separated list of key=value, with '/'-escaping of '=', ',', and - // '\'. - defaultAttrEncoder struct { - // pool is a pool of attribute set builders. The buffers in this pool - // grow to a size that most attribute encodings will not allocate new - // memory. - pool sync.Pool // *bytes.Buffer - } -) - -// escapeChar is used to ensure uniqueness of the attribute encoding where -// keys or values contain either '=' or ','. Since there is no parser needed -// for this encoding and its only requirement is to be unique, this choice is -// arbitrary. Users will see these in some exporters (e.g., stdout), so the -// backslash ('\') is used as a conventional choice. -const escapeChar = '\\' - -var ( - _ Encoder = &defaultAttrEncoder{} - - // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter uint64 - - defaultEncoderOnce sync.Once - defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultAttrEncoder -) - -// NewEncoderID returns a unique attribute encoder ID. It should be called -// once per each type of attribute encoder. Preferably in init() or in var -// definition. -func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} -} - -// DefaultEncoder returns an attribute encoder that encodes attributes in such -// a way that each escaped attribute's key is followed by an equal sign and -// then by an escaped attribute's value. All key-value pairs are separated by -// a comma. -// -// Escaping is done by prepending a backslash before either a backslash, equal -// sign or a comma. -func DefaultEncoder() Encoder { - defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultAttrEncoder{ - pool: sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, - }, - } - }) - return defaultEncoderInstance -} - -// Encode is a part of an implementation of the AttributeEncoder interface. -func (d *defaultAttrEncoder) Encode(iter Iterator) string { - buf := d.pool.Get().(*bytes.Buffer) - defer d.pool.Put(buf) - buf.Reset() - - for iter.Next() { - i, keyValue := iter.IndexedAttribute() - if i > 0 { - _, _ = buf.WriteRune(',') - } - copyAndEscape(buf, string(keyValue.Key)) - - _, _ = buf.WriteRune('=') - - if keyValue.Value.Type() == STRING { - copyAndEscape(buf, keyValue.Value.AsString()) - } else { - _, _ = buf.WriteString(keyValue.Value.Emit()) - } - } - return buf.String() -} - -// ID is a part of an implementation of the AttributeEncoder interface. -func (*defaultAttrEncoder) ID() EncoderID { - return defaultEncoderID -} - -// copyAndEscape escapes `=`, `,` and its own escape character (`\`), -// making the default encoding unique. -func copyAndEscape(buf *bytes.Buffer, val string) { - for _, ch := range val { - switch ch { - case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) - } - _, _ = buf.WriteRune(ch) - } -} - -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. -func (id EncoderID) Valid() bool { - return id.value != 0 -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go deleted file mode 100644 index 3eeaa5d442..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Filter supports removing certain attributes from attribute sets. When -// the filter returns true, the attribute will be kept in the filtered -// attribute set. When the filter returns false, the attribute is excluded -// from the filtered attribute set, and the attribute instead appears in -// the removed list of excluded attributes. -type Filter func(KeyValue) bool - -// NewAllowKeysFilter returns a Filter that only allows attributes with one of -// the provided keys. -// -// If keys is empty a deny-all filter is returned. -func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return false } - } - - allowed := make(map[Key]struct{}, len(keys)) - for _, k := range keys { - allowed[k] = struct{}{} - } - return func(kv KeyValue) bool { - _, ok := allowed[kv.Key] - return ok - } -} - -// NewDenyKeysFilter returns a Filter that only allows attributes -// that do not have one of the provided keys. -// -// If keys is empty an allow-all filter is returned. -func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return true } - } - - forbid := make(map[Key]struct{}, len(keys)) - for _, k := range keys { - forbid[k] = struct{}{} - } - return func(kv KeyValue) bool { - _, ok := forbid[kv.Key] - return !ok - } -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go deleted file mode 100644 index b76d2bbfdb..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package attribute provide several helper functions for some commonly used -logic of processing attributes. -*/ -package attribute // import "go.opentelemetry.io/otel/attribute/internal" - -import ( - "reflect" -) - -// BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { - var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { - var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { - var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { - var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]bool, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} - -// AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]int64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} - -// AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]float64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} - -// AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]string, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/iterator.go deleted file mode 100644 index f2ba89ce4b..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Iterator allows iterating over the set of attributes in order, sorted by -// key. -type Iterator struct { - storage *Set - idx int -} - -// MergeIterator supports iterating over two sets of attributes while -// eliminating duplicate values from the combined set. The first iterator -// value takes precedence. -type MergeIterator struct { - one oneIterator - two oneIterator - current KeyValue -} - -type oneIterator struct { - iter Iterator - done bool - attr KeyValue -} - -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. -func (i *Iterator) Next() bool { - i.idx++ - return i.idx < i.Len() -} - -// Label returns current KeyValue. Must be called only after Next returns -// true. -// -// Deprecated: Use Attribute instead. -func (i *Iterator) Label() KeyValue { - return i.Attribute() -} - -// Attribute returns the current KeyValue of the Iterator. It must be called -// only after Next returns true. -func (i *Iterator) Attribute() KeyValue { - kv, _ := i.storage.Get(i.idx) - return kv -} - -// IndexedLabel returns current index and attribute. Must be called only -// after Next returns true. -// -// Deprecated: Use IndexedAttribute instead. -func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// IndexedAttribute returns current index and attribute. Must be called only -// after Next returns true. -func (i *Iterator) IndexedAttribute() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// Len returns a number of attributes in the iterated set. -func (i *Iterator) Len() int { - return i.storage.Len() -} - -// ToSlice is a convenience function that creates a slice of attributes from -// the passed iterator. The iterator is set up to start from the beginning -// before creating the slice. -func (i *Iterator) ToSlice() []KeyValue { - l := i.Len() - if l == 0 { - return nil - } - i.idx = -1 - slice := make([]KeyValue, 0, l) - for i.Next() { - slice = append(slice, i.Attribute()) - } - return slice -} - -// NewMergeIterator returns a MergeIterator for merging two attribute sets. -// Duplicates are resolved by taking the value from the first set. -func NewMergeIterator(s1, s2 *Set) MergeIterator { - mi := MergeIterator{ - one: makeOne(s1.Iter()), - two: makeOne(s2.Iter()), - } - return mi -} - -func makeOne(iter Iterator) oneIterator { - oi := oneIterator{ - iter: iter, - } - oi.advance() - return oi -} - -func (oi *oneIterator) advance() { - if oi.done = !oi.iter.Next(); !oi.done { - oi.attr = oi.iter.Attribute() - } -} - -// Next returns true if there is another attribute available. -func (m *MergeIterator) Next() bool { - if m.one.done && m.two.done { - return false - } - if m.one.done { - m.current = m.two.attr - m.two.advance() - return true - } - if m.two.done { - m.current = m.one.attr - m.one.advance() - return true - } - if m.one.attr.Key == m.two.attr.Key { - m.current = m.one.attr // first iterator attribute value wins - m.one.advance() - m.two.advance() - return true - } - if m.one.attr.Key < m.two.attr.Key { - m.current = m.one.attr - m.one.advance() - return true - } - m.current = m.two.attr - m.two.advance() - return true -} - -// Label returns the current value after Next() returns true. -// -// Deprecated: Use Attribute instead. -func (m *MergeIterator) Label() KeyValue { - return m.current -} - -// Attribute returns the current value after Next() returns true. -func (m *MergeIterator) Attribute() KeyValue { - return m.current -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/key.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/key.go deleted file mode 100644 index d9a22c6502..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/key.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Key represents the key part in key-value pairs. It's a string. The -// allowed character set in the key depends on the use of the key. -type Key string - -// Bool creates a KeyValue instance with a BOOL Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Bool(name, value). -func (k Key) Bool(v bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolValue(v), - } -} - -// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- BoolSlice(name, value). -func (k Key) BoolSlice(v []bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolSliceValue(v), - } -} - -// Int creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int(name, value). -func (k Key) Int(v int) KeyValue { - return KeyValue{ - Key: k, - Value: IntValue(v), - } -} - -// IntSlice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- IntSlice(name, value). -func (k Key) IntSlice(v []int) KeyValue { - return KeyValue{ - Key: k, - Value: IntSliceValue(v), - } -} - -// Int64 creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64(name, value). -func (k Key) Int64(v int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64Value(v), - } -} - -// Int64Slice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64Slice(name, value). -func (k Key) Int64Slice(v []int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64SliceValue(v), - } -} - -// Float64 creates a KeyValue instance with a FLOAT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64(v float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64Value(v), - } -} - -// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64Slice(v []float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64SliceValue(v), - } -} - -// String creates a KeyValue instance with a STRING Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- String(name, value). -func (k Key) String(v string) KeyValue { - return KeyValue{ - Key: k, - Value: StringValue(v), - } -} - -// StringSlice creates a KeyValue instance with a STRINGSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- StringSlice(name, value). -func (k Key) StringSlice(v []string) KeyValue { - return KeyValue{ - Key: k, - Value: StringSliceValue(v), - } -} - -// Defined returns true for non-empty keys. -func (k Key) Defined() bool { - return len(k) != 0 -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/kv.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/kv.go deleted file mode 100644 index 3028f9a40f..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "fmt" -) - -// KeyValue holds a key and value pair. -type KeyValue struct { - Key Key - Value Value -} - -// Valid returns if kv is a valid OpenTelemetry attribute. -func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID -} - -// Bool creates a KeyValue with a BOOL Value type. -func Bool(k string, v bool) KeyValue { - return Key(k).Bool(v) -} - -// BoolSlice creates a KeyValue with a BOOLSLICE Value type. -func BoolSlice(k string, v []bool) KeyValue { - return Key(k).BoolSlice(v) -} - -// Int creates a KeyValue with an INT64 Value type. -func Int(k string, v int) KeyValue { - return Key(k).Int(v) -} - -// IntSlice creates a KeyValue with an INT64SLICE Value type. -func IntSlice(k string, v []int) KeyValue { - return Key(k).IntSlice(v) -} - -// Int64 creates a KeyValue with an INT64 Value type. -func Int64(k string, v int64) KeyValue { - return Key(k).Int64(v) -} - -// Int64Slice creates a KeyValue with an INT64SLICE Value type. -func Int64Slice(k string, v []int64) KeyValue { - return Key(k).Int64Slice(v) -} - -// Float64 creates a KeyValue with a FLOAT64 Value type. -func Float64(k string, v float64) KeyValue { - return Key(k).Float64(v) -} - -// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. -func Float64Slice(k string, v []float64) KeyValue { - return Key(k).Float64Slice(v) -} - -// String creates a KeyValue with a STRING Value type. -func String(k, v string) KeyValue { - return Key(k).String(v) -} - -// StringSlice creates a KeyValue with a STRINGSLICE Value type. -func StringSlice(k string, v []string) KeyValue { - return Key(k).StringSlice(v) -} - -// Stringer creates a new key-value pair with a passed name and a string -// value generated by the passed Stringer interface. -func Stringer(k string, v fmt.Stringer) KeyValue { - return Key(k).String(v.String()) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go deleted file mode 100644 index 5791c6e7aa..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "math" -) - -func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func rawToBool(r uint64) bool { - return r != 0 -} - -func int64ToRaw(i int64) uint64 { - // Assumes original was a valid int64 (overflow not checked). - return uint64(i) // nolint: gosec -} - -func rawToInt64(r uint64) int64 { - // Assumes original was a valid int64 (overflow not checked). - return int64(r) // nolint: gosec -} - -func float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func rawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/set.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/set.go deleted file mode 100644 index 6cbefceadf..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/set.go +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "cmp" - "encoding/json" - "reflect" - "slices" - "sort" -) - -type ( - // Set is the representation for a distinct attribute set. It manages an - // immutable set of attributes, with an internal cache for storing - // attribute encodings. - // - // This type will remain comparable for backwards compatibility. The - // equivalence of Sets across versions is not guaranteed to be stable. - // Prior versions may find two Sets to be equal or not when compared - // directly (i.e. ==), but subsequent versions may not. Users should use - // the Equals method to ensure stable equivalence checking. - // - // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. In addition to that type providing guarantees - // on stable equivalence, it may also provide performance improvements. - Set struct { - equivalent Distinct - } - - // Distinct is a unique identifier of a Set. - // - // Distinct is designed to be ensures equivalence stability: comparisons - // will return the save value across versions. For this reason, Distinct - // should always be used as a map key instead of a Set. - Distinct struct { - iface interface{} - } - - // Sortable implements sort.Interface, used for sorting KeyValue. - // - // Deprecated: This type is no longer used. It was added as a performance - // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no - // longer supported by the module). - Sortable []KeyValue -) - -var ( - // keyValueType is used in computeDistinctReflect. - keyValueType = reflect.TypeOf(KeyValue{}) - - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, - } -) - -// EmptySet returns a reference to a Set with no elements. -// -// This is a convenience provided for optimized calling utility. -func EmptySet() *Set { - return emptySet -} - -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) -} - -// Valid returns true if this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil -} - -// Len returns the number of attributes in this set. -func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { - return 0 - } - return l.equivalent.reflectValue().Len() -} - -// Get returns the KeyValue at ordered position idx in this set. -func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { - return KeyValue{}, false - } - value := l.equivalent.reflectValue() - - if idx >= 0 && idx < value.Len() { - // Note: The Go compiler successfully avoids an allocation for - // the interface{} conversion here: - return value.Index(idx).Interface().(KeyValue), true - } - - return KeyValue{}, false -} - -// Value returns the value of a specified key in this set. -func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { - return Value{}, false - } - rValue := l.equivalent.reflectValue() - vlen := rValue.Len() - - idx := sort.Search(vlen, func(idx int) bool { - return rValue.Index(idx).Interface().(KeyValue).Key >= k - }) - if idx >= vlen { - return Value{}, false - } - keyValue := rValue.Index(idx).Interface().(KeyValue) - if k == keyValue.Key { - return keyValue.Value, true - } - return Value{}, false -} - -// HasValue tests whether a key is defined in this set. -func (l *Set) HasValue(k Key) bool { - if l == nil { - return false - } - _, ok := l.Value(k) - return ok -} - -// Iter returns an iterator for visiting the attributes in this set. -func (l *Set) Iter() Iterator { - return Iterator{ - storage: l, - idx: -1, - } -} - -// ToSlice returns the set of attributes belonging to this set, sorted, where -// keys appear no more than once. -func (l *Set) ToSlice() []KeyValue { - iter := l.Iter() - return iter.ToSlice() -} - -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any -// attribute set with the same elements as this, where sets are made unique by -// choosing the last value in the input for any given key. -func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent - } - return l.equivalent -} - -// Equals returns true if the argument set is equivalent to this set. -func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() -} - -// Encoded returns the encoded form of this set, according to encoder. -func (l *Set) Encoded(encoder Encoder) string { - if l == nil || encoder == nil { - return "" - } - - return encoder.Encode(l.Iter()) -} - -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - -// NewSet returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// Except for empty sets, this method adds an additional allocation compared -// with calls that include a Sortable. -func NewSet(kvs ...KeyValue) Set { - s, _ := NewSetWithFiltered(kvs, nil) - return s -} - -// NewSetWithSortable returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Sortable option as a memory optimization. -// -// Deprecated: Use [NewSet] instead. -func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { - s, _ := NewSetWithFiltered(kvs, nil) - return s -} - -// NewSetWithFiltered returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Filter to include/exclude attribute keys from the -// return value. Excluded keys are returned as a slice of attribute values. -func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - // Stable sort so the following de-duplication can implement - // last-value-wins semantics. - slices.SortStableFunc(kvs, func(a, b KeyValue) int { - return cmp.Compare(a.Key, b.Key) - }) - - position := len(kvs) - 1 - offset := position - 1 - - // The requirements stated above require that the stable - // result be placed in the end of the input slice, while - // overwritten values are swapped to the beginning. - // - // De-duplicate with last-value-wins semantics. Preserve - // duplicate values at the beginning of the input slice. - for ; offset >= 0; offset-- { - if kvs[offset].Key == kvs[position].Key { - continue - } - position-- - kvs[offset], kvs[position] = kvs[position], kvs[offset] - } - kvs = kvs[position:] - - if filter != nil { - if div := filteredToFront(kvs, filter); div != 0 { - return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] - } - } - return Set{equivalent: computeDistinct(kvs)}, nil -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -// -// Deprecated: Use [NewSetWithFiltered] instead. -func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { - return NewSetWithFiltered(kvs, filter) -} - -// filteredToFront filters slice in-place using keep function. All KeyValues that need to -// be removed are moved to the front. All KeyValues that need to be kept are -// moved (in-order) to the back. The index for the first KeyValue to be kept is -// returned. -func filteredToFront(slice []KeyValue, keep Filter) int { - n := len(slice) - j := n - for i := n - 1; i >= 0; i-- { - if keep(slice[i]) { - j-- - slice[i], slice[j] = slice[j], slice[i] - } - } - return j -} - -// Filter returns a filtered copy of this Set. See the documentation for -// NewSetWithSortableFiltered for more details. -func (l *Set) Filter(re Filter) (Set, []KeyValue) { - if re == nil { - return *l, nil - } - - // Iterate in reverse to the first attribute that will be filtered out. - n := l.Len() - first := n - 1 - for ; first >= 0; first-- { - kv, _ := l.Get(first) - if !re(kv) { - break - } - } - - // No attributes will be dropped, return the immutable Set l and nil. - if first < 0 { - return *l, nil - } - - // Copy now that we know we need to return a modified set. - // - // Do not do this in-place on the underlying storage of *Set l. Sets are - // immutable and filtering should not change this. - slice := l.ToSlice() - - // Don't re-iterate the slice if only slice[0] is filtered. - if first == 0 { - // It is safe to assume len(slice) >= 1 given we found at least one - // attribute above that needs to be filtered out. - return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] - } - - // Move the filtered slice[first] to the front (preserving order). - kv := slice[first] - copy(slice[1:first+1], slice[:first]) - slice[0] = kv - - // Do not re-evaluate re(slice[first+1:]). - div := filteredToFront(slice[1:first+1], re) + 1 - return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] -} - -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) - } - return Distinct{ - iface: iface, - } -} - -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { - switch len(kvs) { - case 1: - return [1]KeyValue(kvs) - case 2: - return [2]KeyValue(kvs) - case 3: - return [3]KeyValue(kvs) - case 4: - return [4]KeyValue(kvs) - case 5: - return [5]KeyValue(kvs) - case 6: - return [6]KeyValue(kvs) - case 7: - return [7]KeyValue(kvs) - case 8: - return [8]KeyValue(kvs) - case 9: - return [9]KeyValue(kvs) - case 10: - return [10]KeyValue(kvs) - default: - return nil - } -} - -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { - at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() - for i, keyValue := range kvs { - *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue - } - return at.Interface() -} - -// MarshalJSON returns the JSON encoding of the Set. -func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) -} - -// MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() interface{} { - kvs := make(map[string]string) - for _, kv := range l.ToSlice() { - kvs[string(kv.Key)] = kv.Value.Emit() - } - return kvs -} - -// Len implements sort.Interface. -func (l *Sortable) Len() int { - return len(*l) -} - -// Swap implements sort.Interface. -func (l *Sortable) Swap(i, j int) { - (*l)[i], (*l)[j] = (*l)[j], (*l)[i] -} - -// Less implements sort.Interface. -func (l *Sortable) Less(i, j int) bool { - return (*l)[i].Key < (*l)[j].Key -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/type_string.go deleted file mode 100644 index e584b24776..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT. - -package attribute - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[INVALID-0] - _ = x[BOOL-1] - _ = x[INT64-2] - _ = x[FLOAT64-3] - _ = x[STRING-4] - _ = x[BOOLSLICE-5] - _ = x[INT64SLICE-6] - _ = x[FLOAT64SLICE-7] - _ = x[STRINGSLICE-8] -} - -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" - -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/value.go b/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/value.go deleted file mode 100644 index 817eecacf1..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/attribute/value.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - attribute "go.opentelemetry.io/otel/attribute/internal" -) - -//go:generate stringer -type=Type - -// Type describes the type of the data Value holds. -type Type int // nolint: revive // redefines builtin Type. - -// Value represents the value part in key-value pairs. -type Value struct { - vtype Type - numeric uint64 - stringly string - slice interface{} -} - -const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota - // BOOL is a boolean Type Value. - BOOL - // INT64 is a 64-bit signed integral Type Value. - INT64 - // FLOAT64 is a 64-bit floating point Type Value. - FLOAT64 - // STRING is a string Type Value. - STRING - // BOOLSLICE is a slice of booleans Type Value. - BOOLSLICE - // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. - INT64SLICE - // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. - FLOAT64SLICE - // STRINGSLICE is a slice of strings Type Value. - STRINGSLICE -) - -// BoolValue creates a BOOL Value. -func BoolValue(v bool) Value { - return Value{ - vtype: BOOL, - numeric: boolToRaw(v), - } -} - -// BoolSliceValue creates a BOOLSLICE Value. -func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} -} - -// IntValue creates an INT64 Value. -func IntValue(v int) Value { - return Int64Value(int64(v)) -} - -// IntSliceValue creates an INTSLICE Value. -func IntSliceValue(v []int) Value { - var int64Val int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) - for i, val := range v { - cp.Elem().Index(i).SetInt(int64(val)) - } - return Value{ - vtype: INT64SLICE, - slice: cp.Elem().Interface(), - } -} - -// Int64Value creates an INT64 Value. -func Int64Value(v int64) Value { - return Value{ - vtype: INT64, - numeric: int64ToRaw(v), - } -} - -// Int64SliceValue creates an INT64SLICE Value. -func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} -} - -// Float64Value creates a FLOAT64 Value. -func Float64Value(v float64) Value { - return Value{ - vtype: FLOAT64, - numeric: float64ToRaw(v), - } -} - -// Float64SliceValue creates a FLOAT64SLICE Value. -func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} -} - -// StringValue creates a STRING Value. -func StringValue(v string) Value { - return Value{ - vtype: STRING, - stringly: v, - } -} - -// StringSliceValue creates a STRINGSLICE Value. -func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} -} - -// Type returns a type of the Value. -func (v Value) Type() Type { - return v.vtype -} - -// AsBool returns the bool value. Make sure that the Value's type is -// BOOL. -func (v Value) AsBool() bool { - return rawToBool(v.numeric) -} - -// AsBoolSlice returns the []bool value. Make sure that the Value's type is -// BOOLSLICE. -func (v Value) AsBoolSlice() []bool { - if v.vtype != BOOLSLICE { - return nil - } - return v.asBoolSlice() -} - -func (v Value) asBoolSlice() []bool { - return attribute.AsBoolSlice(v.slice) -} - -// AsInt64 returns the int64 value. Make sure that the Value's type is -// INT64. -func (v Value) AsInt64() int64 { - return rawToInt64(v.numeric) -} - -// AsInt64Slice returns the []int64 value. Make sure that the Value's type is -// INT64SLICE. -func (v Value) AsInt64Slice() []int64 { - if v.vtype != INT64SLICE { - return nil - } - return v.asInt64Slice() -} - -func (v Value) asInt64Slice() []int64 { - return attribute.AsInt64Slice(v.slice) -} - -// AsFloat64 returns the float64 value. Make sure that the Value's -// type is FLOAT64. -func (v Value) AsFloat64() float64 { - return rawToFloat64(v.numeric) -} - -// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is -// FLOAT64SLICE. -func (v Value) AsFloat64Slice() []float64 { - if v.vtype != FLOAT64SLICE { - return nil - } - return v.asFloat64Slice() -} - -func (v Value) asFloat64Slice() []float64 { - return attribute.AsFloat64Slice(v.slice) -} - -// AsString returns the string value. Make sure that the Value's type -// is STRING. -func (v Value) AsString() string { - return v.stringly -} - -// AsStringSlice returns the []string value. Make sure that the Value's type is -// STRINGSLICE. -func (v Value) AsStringSlice() []string { - if v.vtype != STRINGSLICE { - return nil - } - return v.asStringSlice() -} - -func (v Value) asStringSlice() []string { - return attribute.AsStringSlice(v.slice) -} - -type unknownValueType struct{} - -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { - switch v.Type() { - case BOOL: - return v.AsBool() - case BOOLSLICE: - return v.asBoolSlice() - case INT64: - return v.AsInt64() - case INT64SLICE: - return v.asInt64Slice() - case FLOAT64: - return v.AsFloat64() - case FLOAT64SLICE: - return v.asFloat64Slice() - case STRING: - return v.stringly - case STRINGSLICE: - return v.asStringSlice() - } - return unknownValueType{} -} - -// Emit returns a string representation of Value's data. -func (v Value) Emit() string { - switch v.Type() { - case BOOLSLICE: - return fmt.Sprint(v.asBoolSlice()) - case BOOL: - return strconv.FormatBool(v.AsBool()) - case INT64SLICE: - j, err := json.Marshal(v.asInt64Slice()) - if err != nil { - return fmt.Sprintf("invalid: %v", v.asInt64Slice()) - } - return string(j) - case INT64: - return strconv.FormatInt(v.AsInt64(), 10) - case FLOAT64SLICE: - j, err := json.Marshal(v.asFloat64Slice()) - if err != nil { - return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) - } - return string(j) - case FLOAT64: - return fmt.Sprint(v.AsFloat64()) - case STRINGSLICE: - j, err := json.Marshal(v.asStringSlice()) - if err != nil { - return fmt.Sprintf("invalid: %v", v.asStringSlice()) - } - return string(j) - case STRING: - return v.stringly - default: - return "unknown" - } -} - -// MarshalJSON returns the JSON encoding of the Value. -func (v Value) MarshalJSON() ([]byte, error) { - var jsonVal struct { - Type string - Value interface{} - } - jsonVal.Type = v.Type().String() - jsonVal.Value = v.AsInterface() - return json.Marshal(jsonVal) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/codes/README.md b/openshift/tools/vendor/go.opentelemetry.io/otel/codes/README.md deleted file mode 100644 index 24c52b387d..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/codes/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Codes - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/codes/codes.go b/openshift/tools/vendor/go.opentelemetry.io/otel/codes/codes.go deleted file mode 100644 index 49a35b1225..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/codes/codes.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package codes // import "go.opentelemetry.io/otel/codes" - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" -) - -const ( - // Unset is the default status code. - Unset Code = 0 - - // Error indicates the operation contains an error. - // - // NOTE: The error code in OTLP is 2. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Error Code = 1 - - // Ok indicates operation has been validated by an Application developers - // or Operator to have completed successfully, or contain no error. - // - // NOTE: The Ok code in OTLP is 1. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Ok Code = 2 - - maxCode = 3 -) - -// Code is an 32-bit representation of a status state. -type Code uint32 - -var codeToStr = map[Code]string{ - Unset: "Unset", - Error: "Error", - Ok: "Ok", -} - -var strToCode = map[string]Code{ - `"Unset"`: Unset, - `"Error"`: Error, - `"Ok"`: Ok, -} - -// String returns the Code as a string. -func (c Code) String() string { - return codeToStr[c] -} - -// UnmarshalJSON unmarshals b into the Code. -// -// This is based on the functionality in the gRPC codes package: -// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return errors.New("nil receiver passed to UnmarshalJSON") - } - - var x interface{} - if err := json.Unmarshal(b, &x); err != nil { - return err - } - switch x.(type) { - case string: - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - case float64: - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) // nolint: gosec // Bit size of 32 check above. - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - default: - return fmt.Errorf("invalid code: %q", string(b)) - } -} - -// MarshalJSON returns c as the JSON encoding of c. -func (c *Code) MarshalJSON() ([]byte, error) { - if c == nil { - return []byte("null"), nil - } - str, ok := codeToStr[*c] - if !ok { - return nil, fmt.Errorf("invalid code: %d", *c) - } - return []byte(fmt.Sprintf("%q", str)), nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/codes/doc.go b/openshift/tools/vendor/go.opentelemetry.io/otel/codes/doc.go deleted file mode 100644 index ee8db448b8..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/codes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package codes defines the canonical error codes used by OpenTelemetry. - -It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). -*/ -package codes // import "go.opentelemetry.io/otel/codes" diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md deleted file mode 100644 index 2de1fc3c6b..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.26.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go deleted file mode 100644 index d8dc822b26..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go +++ /dev/null @@ -1,8996 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -import "go.opentelemetry.io/otel/attribute" - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: stable - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming - // to the "aspnetcore.diagnostics.exception.result" semantic conventions. - // It represents the aSP.NET Core exception middleware handling result - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'handled', 'unhandled' - AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") - - // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the - // "aspnetcore.routing.match_status" semantic conventions. It represents - // the match result - success or failure - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'success', 'failure' - AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -var ( - // Exception was handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") - // Exception was not handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") - // Exception handling was skipped because the response had started - AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") - // Exception handling didn't run because the request was aborted - AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") -) - -var ( - // Match succeeded - AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") - // Match failed - AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// Generic attributes for AWS services. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// Attributes for AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Attributes for AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Attributes for AWS Logs. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Attributes for AWS Lambda. -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for AWS S3. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// The web browser attributes -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Attributes for CloudEvents. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCPUStateKey is the attribute Key conforming to the - // "container.cpu.state" semantic conventions. It represents the CPU state - // for this data point. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'user', 'kernel' - ContainerCPUStateKey = attribute.Key("container.cpu.state") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -var ( - // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) - ContainerCPUStateUser = ContainerCPUStateKey.String("user") - // When CPU is used by the system (host OS) - ContainerCPUStateSystem = ContainerCPUStateKey.String("system") - // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) - ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// This group defines the attributes used to describe telemetry in the context -// of databases. -const ( - // DBClientConnectionsPoolNameKey is the attribute Key conforming to the - // "db.client.connections.pool.name" semantic conventions. It represents - // the name of the connection pool; unique within the instrumented - // application. In case the connection pool implementation doesn't provide - // a name, instrumentation should use a combination of `server.address` and - // `server.port` attributes formatted as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myDataSource' - DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - - // DBClientConnectionsStateKey is the attribute Key conforming to the - // "db.client.connections.state" semantic conventions. It represents the - // state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle' - DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: If the collection name is parsed from the query, it SHOULD match - // the value provided in the query and may be qualified with the schema and - // database name. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" - // semantic conventions. It represents the name of the database, fully - // qualified within the server address and port. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'test.users' - // Note: If a database system has multiple namespace components, they - // SHOULD be concatenated (potentially using database system specific - // conventions) from most general to most specific namespace component, and - // more specific namespaces SHOULD NOT be captured without the more general - // namespaces, to ensure that "startswith" queries for the more general - // namespaces will be valid. - // Semantic conventions for individual database systems SHOULD document - // what `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationNameKey is the attribute Key conforming to the - // "db.operation.name" semantic conventions. It represents the name of the - // operation or command being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: It is RECOMMENDED to capture the value as provided by the - // application without attempting to do any case normalization. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey - // "WuValue"' - DBQueryTextKey = attribute.Key("db.query.text") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents the database management system (DBMS) product - // as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual DBMS may differ from the one identified by the client. - // For example, when using PostgreSQL client libraries to connect to a - // CockroachDB, the `db.system` is set to `postgresql` based on the - // instrumentation's best knowledge. - DBSystemKey = attribute.Key("db.system") -) - -var ( - // idle - DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") - // used - DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBClientConnectionsPoolName returns an attribute KeyValue conforming to -// the "db.client.connections.pool.name" semantic conventions. It represents -// the name of the connection pool; unique within the instrumented application. -// In case the connection pool implementation doesn't provide a name, -// instrumentation should use a combination of `server.address` and -// `server.port` attributes formatted as `server.address:server.port`. -func DBClientConnectionsPoolName(val string) attribute.KeyValue { - return DBClientConnectionsPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the -// "db.namespace" semantic conventions. It represents the name of the database, -// fully qualified within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the -// "db.query.text" semantic conventions. It represents the database query being -// executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// This group defines attributes for Cassandra. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// This group defines attributes for Azure Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// This group defines attributes for Elasticsearch. -const ( - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") -) - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// Attributes for software deployments. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// Attributes that represents an occurrence of a lifecycle transition on the -// Android platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the deprecated use the - // `device.app.lifecycle` event definition including `android.state` as a - // payload field instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report a DNS query. -const ( - // DNSQuestionNameKey is the attribute Key conforming to the - // "dns.question.name" semantic conventions. It represents the name being - // queried. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.example.com', 'opentelemetry.io' - // Note: If the name field contains non-printable characters (below 32 or - // above 126), those characters should be represented as escaped base 10 - // integers (\DDD). Back slashes and quotes should be escaped. Tabs, - // carriage returns, and line feeds should be converted to \t, \r, and \n - // respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Attributes for operations with an authenticated and/or authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be - // used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// FaaS attributes -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Attributes for Feature Flags. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// Describes file attributes. -const ( - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is - // located. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/user', 'C:\\Program Files\\MyApp' - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the - // leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: When the file name has multiple extensions (example.tar.gz), only - // the last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.png' - FileNameKey = attribute.Key("file.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/alice/example.png', 'C:\\Program - // Files\\MyApp\\myapp.exe' - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - FileSizeKey = attribute.Key("file.size") -) - -// FileDirectory returns an attribute KeyValue conforming to the -// "file.directory" semantic conventions. It represents the directory where the -// file is located. It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the -// "file.extension" semantic conventions. It represents the file extension, -// excluding the leading dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" -// semantic conventions. It represents the name of the file including the -// extension, without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" -// semantic conventions. It represents the full path to the file, including the -// file name. It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" -// semantic conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// Attributes for Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Attributes for Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// The attributes used to describe telemetry in the context of LLM (Large -// Language Models) requests and responses. -const ( - // GenAiCompletionKey is the attribute Key conforming to the - // "gen_ai.completion" semantic conventions. It represents the full - // response received from the LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'assistant', 'content': 'The capital of France is - // Paris.'}]" - // Note: It's RECOMMENDED to format completions as JSON string matching - // [OpenAI messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiCompletionKey = attribute.Key("gen_ai.completion") - - // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" - // semantic conventions. It represents the full prompt sent to an LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'user', 'content': 'What is the capital of - // France?'}]" - // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI - // messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiPromptKey = attribute.Key("gen_ai.prompt") - - // GenAiRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the - // maximum number of tokens the LLM generates for a request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAiRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of - // the LLM a request is being made to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4' - GenAiRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAiRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0.0 - GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAiRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p - // sampling setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0 - GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAiResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to - // each generation received. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'stop' - GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAiResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'chatcmpl-123' - GenAiResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAiResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of - // the LLM a response was generated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4-0613' - GenAiResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as - // identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'openai' - // Note: The actual GenAI product may differ from the one identified by the - // client. For example, when using OpenAI client libraries to communicate - // with Mistral, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge. - GenAiSystemKey = attribute.Key("gen_ai.system") - - // GenAiUsageCompletionTokensKey is the attribute Key conforming to the - // "gen_ai.usage.completion_tokens" semantic conventions. It represents the - // number of tokens used in the LLM response (completion). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 180 - GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") - - // GenAiUsagePromptTokensKey is the attribute Key conforming to the - // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the - // number of tokens used in the LLM prompt. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") -) - -var ( - // OpenAI - GenAiSystemOpenai = GenAiSystemKey.String("openai") -) - -// GenAiCompletion returns an attribute KeyValue conforming to the -// "gen_ai.completion" semantic conventions. It represents the full response -// received from the LLM. -func GenAiCompletion(val string) attribute.KeyValue { - return GenAiCompletionKey.String(val) -} - -// GenAiPrompt returns an attribute KeyValue conforming to the -// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to -// an LLM. -func GenAiPrompt(val string) attribute.KeyValue { - return GenAiPromptKey.String(val) -} - -// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the LLM generates for a request. -func GenAiRequestMaxTokens(val int) attribute.KeyValue { - return GenAiRequestMaxTokensKey.Int(val) -} - -// GenAiRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// LLM a request is being made to. -func GenAiRequestModel(val string) attribute.KeyValue { - return GenAiRequestModelKey.String(val) -} - -// GenAiRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the LLM request. -func GenAiRequestTemperature(val float64) attribute.KeyValue { - return GenAiRequestTemperatureKey.Float64(val) -} - -// GenAiRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p -// sampling setting for the LLM request. -func GenAiRequestTopP(val float64) attribute.KeyValue { - return GenAiRequestTopPKey.Float64(val) -} - -// GenAiResponseFinishReasons returns an attribute KeyValue conforming to -// the "gen_ai.response.finish_reasons" semantic conventions. It represents the -// array of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAiResponseFinishReasonsKey.StringSlice(val) -} - -// GenAiResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique -// identifier for the completion. -func GenAiResponseID(val string) attribute.KeyValue { - return GenAiResponseIDKey.String(val) -} - -// GenAiResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// LLM a response was generated from. -func GenAiResponseModel(val string) attribute.KeyValue { - return GenAiResponseModelKey.String(val) -} - -// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the -// number of tokens used in the LLM response (completion). -func GenAiUsageCompletionTokens(val int) attribute.KeyValue { - return GenAiUsageCompletionTokensKey.Int(val) -} - -// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number -// of tokens used in the LLM prompt. -func GenAiUsagePromptTokens(val int) attribute.KeyValue { - return GenAiUsagePromptTokensKey.Int(val) -} - -// Attributes for GraphQL. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Attributes for the Android platform on which the Android application is -// running. -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1', 'r1p1' - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of - // the HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'active', 'idle' - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the - // "http.request.size" semantic conventions. It represents the total size - // of the request in bytes. This should be the total number of bytes sent - // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 - // and HTTP/3), headers, and request body if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size - // of the response in bytes. This should be the total number of bytes sent - // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and - // HTTP/3), headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // active state - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of -// the request in bytes. This should be the total number of bytes sent over the -// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of -// the response in bytes. This should be the total number of bytes sent over -// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Java Virtual machine related attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") - - // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" - // semantic conventions. It represents the name of the garbage collector - // action. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'end of minor GC', 'end of major GC' - // Note: Garbage collector action is generally obtained via - // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). - JvmGcActionKey = attribute.Key("jvm.gc.action") - - // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" - // semantic conventions. It represents the name of the garbage collector. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Young Generation', 'G1 Old Generation' - // Note: Garbage collector name is generally obtained via - // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). - JvmGcNameKey = attribute.Key("jvm.gc.name") - - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") - - // JvmThreadDaemonKey is the attribute Key conforming to the - // "jvm.thread.daemon" semantic conventions. It represents the whether the - // thread is daemon or not. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") - - // JvmThreadStateKey is the attribute Key conforming to the - // "jvm.thread.state" semantic conventions. It represents the state of the - // thread. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'runnable', 'blocked' - JvmThreadStateKey = attribute.Key("jvm.thread.state") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -var ( - // A thread that has not yet started is in this state - JvmThreadStateNew = JvmThreadStateKey.String("new") - // A thread executing in the Java virtual machine is in this state - JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") - // A thread that is blocked waiting for a monitor lock is in this state - JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") - // A thread that is waiting indefinitely for another thread to perform a particular action is in this state - JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") - // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state - JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") - // A thread that has exited is in this state - JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// JvmGcAction returns an attribute KeyValue conforming to the -// "jvm.gc.action" semantic conventions. It represents the name of the garbage -// collector action. -func JvmGcAction(val string) attribute.KeyValue { - return JvmGcActionKey.String(val) -} - -// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" -// semantic conventions. It represents the name of the garbage collector. -func JvmGcName(val string) attribute.KeyValue { - return JvmGcNameKey.String(val) -} - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// JvmThreadDaemon returns an attribute KeyValue conforming to the -// "jvm.thread.daemon" semantic conventions. It represents the whether the -// thread is daemon or not. -func JvmThreadDaemon(val bool) attribute.KeyValue { - return JvmThreadDaemonKey.Bool(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key - // conforming to the "k8s.container.status.last_terminated_reason" semantic - // conventions. It represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Evicted', 'Error' - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Attributes for a file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// The generic attributes that may be used in any Log Record. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to - // the "messaging.destination.partition.id" semantic conventions. It - // represents the identifier of the partition messages are sent to or - // received from, unique within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1' - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack', 'nack', 'send' - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents the messaging - // system as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate - // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on - // the instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") - // One or more messages are settled - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming -// to the "messaging.destination.partition.id" semantic conventions. It -// represents the identifier of the partition messages are sent to or received -// from, unique within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// This group describes attributes specific to Apache Kafka. -const ( - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// This group describes attributes specific to RabbitMQ. -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic -// conventions. It represents the rabbitMQ message delivery tag -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// This group describes attributes specific to RocketMQ. -const ( - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// This group describes attributes specific to GCP Pub/Sub. -const ( - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. - // It represents the ack deadline in seconds set for the modify ack - // deadline request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It - // represents the ack id for a given message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack_id' - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key - // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" - // semantic conventions. It represents the delivery attempt for a given - // message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") -) - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic -// conventions. It represents the ack deadline in seconds set for the modify -// ack deadline request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It -// represents the ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// This group describes attributes specific to Azure Service Bus. -const ( - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// This group describes attributes specific to Azure Event Hubs. -const ( - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") -) - -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // actual version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.1', '2' - // Note: If protocol version is subject to negotiation (for example using - // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute - // SHOULD be set to the negotiated version. If the actual protocol version - // is not known, this attribute SHOULD NOT be set. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Attributes used by the OpenTracing Shim layer. -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Attributes reserved for OpenTelemetry -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were - // voluntary or involuntary. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and - // time the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:25:34.853Z' - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the - // "process.exit.code" semantic conventions. It represents the exit code of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the - // "process.exit.time" semantic conventions. It represents the date and - // time the process exited, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:26:12.315Z' - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID - // of the process's group leader. This is also the process group ID (PGID) - // of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether - // the process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type - // of page fault for this data point. Type `major` is for major/hard page - // faults, and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user - // ID (RUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the - // username of the real user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved - // user ID (SUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the - // username of the saved user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID - // of the process's session leader. This is also the session ID (SID) of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessUserIDKey is the attribute Key conforming to the - // "process.user.id" semantic conventions. It represents the effective user - // ID (EUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the - // "process.user.name" semantic conventions. It represents the username of - // the effective user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" - // semantic conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily - // unique across all processes on the host but it is unique within the - // process namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") -) - -var ( - // voluntary - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -var ( - // major - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and -// time the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time -// the process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of -// the process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user -// ID (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username -// of the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the -// "process.vpid" semantic conventions. It represents the virtual process -// identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// Attributes for process CPU -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state of - // the process. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the - // "rpc.message.type" semantic conventions. It represents the whether this - // is a received or sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // sent - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the -// "rpc.message.id" semantic conventions. It represents the mUST be calculated -// as two different counters starting from `1` one for sent messages and one -// for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to -// the "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to - // distinguish instances of the same service that exist at the same time - // (e.g. instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random - // Version 1 or Version 4 [RFC - // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an - // inherent unique ID as the source of - // this value if stability is desirable. In that case, the ID SHOULD be - // used as source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the - // purposes of identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) - // file, the underlying - // data, such as pod name and namespace should be treated as confidential, - // being the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we - // do not recommend using one identifier - // for all processes participating in the application. Instead, it's - // recommended each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it - // can't unambiguously determine the - // service instance that is generating that telemetry. For instance, - // creating an UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container - // within that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, - // as they know the target address and - // port. - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If - // `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Describes System attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Attributes for telemetry SDK. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" - // semantic conventions. It represents the domain extracted from the - // `url.full`, such as "opentelemetry.io". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', - // '[1080:0:0:0:8:800:200C:417A]' - // Note: In some cases a URL may refer to an IP and/or port directly, - // without a domain name. In this case, the IP address would go to the - // domain field. If the URL contains a [literal IPv6 - // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by - // `[` and `]`, the `[` and `]` characters should also be captured in the - // domain field. - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from - // the `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: The file extension is only set if it exists, as not every url has - // a file extension. When the file name has multiple extensions - // `example.tar.gz`, only the last one should be captured `gz`, not - // `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed). Sensitive content provided in `url.full` SHOULD be - // scrubbed when instrumentations can identify it. - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" - // semantic conventions. It represents the unmodified original URL as seen - // in the event source. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // 'search?q=OpenTelemetry' - // Note: In network monitoring, the observed URL may be a full URL, whereas - // in access logs, the URL is often just represented as a path. This field - // is meant to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the - // same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full` - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com', 'foo.co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). For example, the registered domain for - // `foo.example.com` is `example.com`. Trying to approximate this by simply - // taking the last two labels will not work well for TLDs such as `co.uk`. - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name - // under the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain - // contains all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'east', 'sub2.sub1' - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If - // the domain has multiple levels of subdomain, such as - // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, - // with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" - // semantic conventions. It represents the low-cardinality template of an - // [absolute path - // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/users/{id}', '/users/:id', '/users?id={id}' - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective - // top level domain (eTLD), also known as the domain suffix, is the last - // part of the domain name. For example, the top level domain for - // example.com is `com`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com', 'co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the -// `url.full`, such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the -// "url.extension" semantic conventions. It represents the file extension -// extracted from the `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the -// "url.original" semantic conventions. It represents the unmodified original -// URL as seen in the event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" -// semantic conventions. It represents the port extracted from the `url.full` -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the -// "url.subdomain" semantic conventions. It represents the subdomain portion of -// a fully qualified domain name includes all of the names except the host name -// under the registered_domain. In a partially qualified domain, or if the -// qualification level of the full name cannot be determined, subdomain -// contains all of the names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the -// "url.template" semantic conventions. It represents the low-cardinality -// template of an [absolute path -// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of -// the domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentNameKey is the attribute Key conforming to the - // "user_agent.name" semantic conventions. It represents the name of the - // user-agent extracted from original. Usually refers to the browser's - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Safari', 'YourApp' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's name - // from original string. In the case of using a user-agent for non-browser - // products, such as microservices with multiple names/versions inside the - // `user_agent.original`, the most significant name SHOULD be selected. In - // such a scenario it should align with `user_agent.version` - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 - // grpc-java-okhttp/1.27.2' - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of - // the user-agent extracted from original. Usually refers to the browser's - // version - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.1.2', '1.0.0' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's - // version from original string. In the case of using a user-agent for - // non-browser products, such as microservices with multiple names/versions - // inside the `user_agent.original`, the most significant version SHOULD be - // selected. In such a scenario it should align with `user_agent.name` - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// The attributes used to describe the packaged software running the -// application code. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go deleted file mode 100644 index d031bbea78..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.26.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go deleted file mode 100644 index bfaee0d56e..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go deleted file mode 100644 index fcdb9f4859..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: Experimental - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Stable - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Stable - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // measures the duration of process operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Measures the duration of process operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // measures the number of processed messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Measures the number of processed messages." - - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: Experimental - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Experimental - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: Experimental - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: Experimental - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go deleted file mode 100644 index 4c87c7adcc..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/LICENSE b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/README.md b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/README.md deleted file mode 100644 index 58ccaba69b..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Trace API - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/auto.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/auto.go deleted file mode 100644 index d90af8f673..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/auto.go +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - "encoding/json" - "fmt" - "math" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - "unicode/utf8" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" - "go.opentelemetry.io/otel/trace/embedded" - "go.opentelemetry.io/otel/trace/internal/telemetry" -) - -// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. -// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument -// the process using the returned TracerProvider, all of the telemetry it -// produces will be processed and handled by that Instrumentation. By default, -// if no Instrumentation instruments the TracerProvider it will not generate -// any trace telemetry. -func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } - -var tracerProviderInstance = new(autoTracerProvider) - -type autoTracerProvider struct{ embedded.TracerProvider } - -var _ TracerProvider = autoTracerProvider{} - -func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { - cfg := NewTracerConfig(opts...) - return autoTracer{ - name: name, - version: cfg.InstrumentationVersion(), - schemaURL: cfg.SchemaURL(), - } -} - -type autoTracer struct { - embedded.Tracer - - name, schemaURL, version string -} - -var _ Tracer = autoTracer{} - -func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { - var psc, sc SpanContext - sampled := true - span := new(autoSpan) - - // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &sc) - - span.sampled.Store(sampled) - span.spanContext = sc - - ctx = ContextWithSpan(ctx, span) - - if sampled { - // Only build traces if sampled. - cfg := NewSpanStartConfig(opts...) - span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) - } - - return ctx, span -} - -// Expected to be implemented in eBPF. -// -//go:noinline -func (t *autoTracer) start( - ctx context.Context, - spanPtr *autoSpan, - psc *SpanContext, - sampled *bool, - sc *SpanContext, -) { - start(ctx, spanPtr, psc, sampled, sc) -} - -// start is used for testing. -var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} - -func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { - span := &telemetry.Span{ - TraceID: telemetry.TraceID(sc.TraceID()), - SpanID: telemetry.SpanID(sc.SpanID()), - Flags: uint32(sc.TraceFlags()), - TraceState: sc.TraceState().String(), - ParentSpanID: telemetry.SpanID(psc.SpanID()), - Name: name, - Kind: spanKind(cfg.SpanKind()), - } - - span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) - - links := cfg.Links() - if limit := maxSpan.Links; limit == 0 { - n := int64(len(links)) - if n > 0 { - span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. - } - } else { - if limit > 0 { - n := int64(max(len(links)-limit, 0)) - span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. - links = links[n:] - } - span.Links = convLinks(links) - } - - if t := cfg.Timestamp(); !t.IsZero() { - span.StartTime = cfg.Timestamp() - } else { - span.StartTime = time.Now() - } - - return &telemetry.Traces{ - ResourceSpans: []*telemetry.ResourceSpans{ - { - ScopeSpans: []*telemetry.ScopeSpans{ - { - Scope: &telemetry.Scope{ - Name: t.name, - Version: t.version, - }, - Spans: []*telemetry.Span{span}, - SchemaURL: t.schemaURL, - }, - }, - }, - }, - }, span -} - -func spanKind(kind SpanKind) telemetry.SpanKind { - switch kind { - case SpanKindInternal: - return telemetry.SpanKindInternal - case SpanKindServer: - return telemetry.SpanKindServer - case SpanKindClient: - return telemetry.SpanKindClient - case SpanKindProducer: - return telemetry.SpanKindProducer - case SpanKindConsumer: - return telemetry.SpanKindConsumer - } - return telemetry.SpanKind(0) // undefined. -} - -type autoSpan struct { - embedded.Span - - spanContext SpanContext - sampled atomic.Bool - - mu sync.Mutex - traces *telemetry.Traces - span *telemetry.Span -} - -func (s *autoSpan) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - // s.spanContext is immutable, do not acquire lock s.mu. - return s.spanContext -} - -func (s *autoSpan) IsRecording() bool { - if s == nil { - return false - } - - return s.sampled.Load() -} - -func (s *autoSpan) SetStatus(c codes.Code, msg string) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - if s.span.Status == nil { - s.span.Status = new(telemetry.Status) - } - - s.span.Status.Message = msg - - switch c { - case codes.Unset: - s.span.Status.Code = telemetry.StatusCodeUnset - case codes.Error: - s.span.Status.Code = telemetry.StatusCodeError - case codes.Ok: - s.span.Status.Code = telemetry.StatusCodeOK - } -} - -func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - limit := maxSpan.Attrs - if limit == 0 { - // No attributes allowed. - n := int64(len(attrs)) - if n > 0 { - s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. - } - return - } - - m := make(map[string]int) - for i, a := range s.span.Attrs { - m[a.Key] = i - } - - for _, a := range attrs { - val := convAttrValue(a.Value) - if val.Empty() { - s.span.DroppedAttrs++ - continue - } - - if idx, ok := m[string(a.Key)]; ok { - s.span.Attrs[idx] = telemetry.Attr{ - Key: string(a.Key), - Value: val, - } - } else if limit < 0 || len(s.span.Attrs) < limit { - s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ - Key: string(a.Key), - Value: val, - }) - m[string(a.Key)] = len(s.span.Attrs) - 1 - } else { - s.span.DroppedAttrs++ - } - } -} - -// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The -// number of dropped attributes is also returned. -func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { - n := len(attrs) - if limit == 0 { - var out uint32 - if n > 0 { - out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. - } - return nil, out - } - - if limit < 0 { - // Unlimited. - return convAttrs(attrs), 0 - } - - if n < 0 { - n = 0 - } - - limit = min(n, limit) - return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. -} - -func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { - if len(attrs) == 0 { - // Avoid allocations if not necessary. - return nil - } - - out := make([]telemetry.Attr, 0, len(attrs)) - for _, attr := range attrs { - key := string(attr.Key) - val := convAttrValue(attr.Value) - if val.Empty() { - continue - } - out = append(out, telemetry.Attr{Key: key, Value: val}) - } - return out -} - -func convAttrValue(value attribute.Value) telemetry.Value { - switch value.Type() { - case attribute.BOOL: - return telemetry.BoolValue(value.AsBool()) - case attribute.INT64: - return telemetry.Int64Value(value.AsInt64()) - case attribute.FLOAT64: - return telemetry.Float64Value(value.AsFloat64()) - case attribute.STRING: - v := truncate(maxSpan.AttrValueLen, value.AsString()) - return telemetry.StringValue(v) - case attribute.BOOLSLICE: - slice := value.AsBoolSlice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.BoolValue(v)) - } - return telemetry.SliceValue(out...) - case attribute.INT64SLICE: - slice := value.AsInt64Slice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.Int64Value(v)) - } - return telemetry.SliceValue(out...) - case attribute.FLOAT64SLICE: - slice := value.AsFloat64Slice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.Float64Value(v)) - } - return telemetry.SliceValue(out...) - case attribute.STRINGSLICE: - slice := value.AsStringSlice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - v = truncate(maxSpan.AttrValueLen, v) - out = append(out, telemetry.StringValue(v)) - } - return telemetry.SliceValue(out...) - } - return telemetry.Value{} -} - -// truncate returns a truncated version of s such that it contains less than -// the limit number of characters. Truncation is applied by returning the limit -// number of valid characters contained in s. -// -// If limit is negative, it returns the original string. -// -// UTF-8 is supported. When truncating, all invalid characters are dropped -// before applying truncation. -// -// If s already contains less than the limit number of bytes, it is returned -// unchanged. No invalid characters are removed. -func truncate(limit int, s string) string { - // This prioritize performance in the following order based on the most - // common expected use-cases. - // - // - Short values less than the default limit (128). - // - Strings with valid encodings that exceed the limit. - // - No limit. - // - Strings with invalid encodings that exceed the limit. - if limit < 0 || len(s) <= limit { - return s - } - - // Optimistically, assume all valid UTF-8. - var b strings.Builder - count := 0 - for i, c := range s { - if c != utf8.RuneError { - count++ - if count > limit { - return s[:i] - } - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // Invalid encoding. - b.Grow(len(s) - 1) - _, _ = b.WriteString(s[:i]) - s = s[i:] - break - } - } - - // Fast-path, no invalid input. - if b.Cap() == 0 { - return s - } - - // Truncate while validating UTF-8. - for i := 0; i < len(s) && count < limit; { - c := s[i] - if c < utf8.RuneSelf { - // Optimization for single byte runes (common case). - _ = b.WriteByte(c) - i++ - count++ - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // We checked for all 1-byte runes above, this is a RuneError. - i++ - continue - } - - _, _ = b.WriteString(s[i : i+size]) - i += size - count++ - } - - return b.String() -} - -func (s *autoSpan) End(opts ...SpanEndOption) { - if s == nil || !s.sampled.Swap(false) { - return - } - - // s.end exists so the lock (s.mu) is not held while s.ended is called. - s.ended(s.end(opts)) -} - -func (s *autoSpan) end(opts []SpanEndOption) []byte { - s.mu.Lock() - defer s.mu.Unlock() - - cfg := NewSpanEndConfig(opts...) - if t := cfg.Timestamp(); !t.IsZero() { - s.span.EndTime = cfg.Timestamp() - } else { - s.span.EndTime = time.Now() - } - - b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. - return b -} - -// Expected to be implemented in eBPF. -// -//go:noinline -func (*autoSpan) ended(buf []byte) { ended(buf) } - -// ended is used for testing. -var ended = func([]byte) {} - -func (s *autoSpan) RecordError(err error, opts ...EventOption) { - if s == nil || err == nil || !s.sampled.Load() { - return - } - - cfg := NewEventConfig(opts...) - - attrs := cfg.Attributes() - attrs = append(attrs, - semconv.ExceptionType(typeStr(err)), - semconv.ExceptionMessage(err.Error()), - ) - if cfg.StackTrace() { - buf := make([]byte, 2048) - n := runtime.Stack(buf, false) - attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) -} - -func typeStr(i any) string { - t := reflect.TypeOf(i) - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - return t.String() - } - return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) -} - -func (s *autoSpan) AddEvent(name string, opts ...EventOption) { - if s == nil || !s.sampled.Load() { - return - } - - cfg := NewEventConfig(opts...) - - s.mu.Lock() - defer s.mu.Unlock() - - s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) -} - -// addEvent adds an event with name and attrs at tStamp to the span. The span -// lock (s.mu) needs to be held by the caller. -func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { - limit := maxSpan.Events - - if limit == 0 { - s.span.DroppedEvents++ - return - } - - if limit > 0 && len(s.span.Events) == limit { - // Drop head while avoiding allocation of more capacity. - copy(s.span.Events[:limit-1], s.span.Events[1:]) - s.span.Events = s.span.Events[:limit-1] - s.span.DroppedEvents++ - } - - e := &telemetry.SpanEvent{Time: tStamp, Name: name} - e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) - - s.span.Events = append(s.span.Events, e) -} - -func (s *autoSpan) AddLink(link Link) { - if s == nil || !s.sampled.Load() { - return - } - - l := maxSpan.Links - - s.mu.Lock() - defer s.mu.Unlock() - - if l == 0 { - s.span.DroppedLinks++ - return - } - - if l > 0 && len(s.span.Links) == l { - // Drop head while avoiding allocation of more capacity. - copy(s.span.Links[:l-1], s.span.Links[1:]) - s.span.Links = s.span.Links[:l-1] - s.span.DroppedLinks++ - } - - s.span.Links = append(s.span.Links, convLink(link)) -} - -func convLinks(links []Link) []*telemetry.SpanLink { - out := make([]*telemetry.SpanLink, 0, len(links)) - for _, link := range links { - out = append(out, convLink(link)) - } - return out -} - -func convLink(link Link) *telemetry.SpanLink { - l := &telemetry.SpanLink{ - TraceID: telemetry.TraceID(link.SpanContext.TraceID()), - SpanID: telemetry.SpanID(link.SpanContext.SpanID()), - TraceState: link.SpanContext.TraceState().String(), - Flags: uint32(link.SpanContext.TraceFlags()), - } - l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) - - return l -} - -func (s *autoSpan) SetName(name string) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.span.Name = name -} - -func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } - -// maxSpan are the span limits resolved during startup. -var maxSpan = newSpanLimits() - -type spanLimits struct { - // Attrs is the number of allowed attributes for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the - // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if - // that is not set, is used. - Attrs int - // AttrValueLen is the maximum attribute value length allowed for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the - // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 - // if that is not set, is used. - AttrValueLen int - // Events is the number of allowed events for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. - Events int - // EventAttrs is the number of allowed attributes for a span event. - // - // The is resolved from the environment variable value for the - // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. - EventAttrs int - // Links is the number of allowed Links for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. - Links int - // LinkAttrs is the number of allowed attributes for a span link. - // - // This is resolved from the environment variable value for the - // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. - LinkAttrs int -} - -func newSpanLimits() spanLimits { - return spanLimits{ - Attrs: firstEnv( - 128, - "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", - "OTEL_ATTRIBUTE_COUNT_LIMIT", - ), - AttrValueLen: firstEnv( - -1, // Unlimited. - "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", - "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", - ), - Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), - EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), - Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), - LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), - } -} - -// firstEnv returns the parsed integer value of the first matching environment -// variable from keys. The defaultVal is returned if the value is not an -// integer or no match is found. -func firstEnv(defaultVal int, keys ...string) int { - for _, key := range keys { - strV := os.Getenv(key) - if strV == "" { - continue - } - - v, err := strconv.Atoi(strV) - if err == nil { - return v - } - // Ignore invalid environment variable. - } - - return defaultVal -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/config.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/config.go deleted file mode 100644 index 9c0b720a4d..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/config.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// TracerConfig is a group of options for a Tracer. -type TracerConfig struct { - instrumentationVersion string - // Schema URL of the telemetry emitted by the Tracer. - schemaURL string - attrs attribute.Set -} - -// InstrumentationVersion returns the version of the library providing instrumentation. -func (t *TracerConfig) InstrumentationVersion() string { - return t.instrumentationVersion -} - -// InstrumentationAttributes returns the attributes associated with the library -// providing instrumentation. -func (t *TracerConfig) InstrumentationAttributes() attribute.Set { - return t.attrs -} - -// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. -func (t *TracerConfig) SchemaURL() string { - return t.schemaURL -} - -// NewTracerConfig applies all the options to a returned TracerConfig. -func NewTracerConfig(options ...TracerOption) TracerConfig { - var config TracerConfig - for _, option := range options { - config = option.apply(config) - } - return config -} - -// TracerOption applies an option to a TracerConfig. -type TracerOption interface { - apply(TracerConfig) TracerConfig -} - -type tracerOptionFunc func(TracerConfig) TracerConfig - -func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { - return fn(cfg) -} - -// SpanConfig is a group of options for a Span. -type SpanConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - links []Link - newRoot bool - spanKind SpanKind - stackTrace bool -} - -// Attributes describe the associated qualities of a Span. -func (cfg *SpanConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in a Span life-cycle. -func (cfg *SpanConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *SpanConfig) StackTrace() bool { - return cfg.stackTrace -} - -// Links are the associations a Span has with other Spans. -func (cfg *SpanConfig) Links() []Link { - return cfg.links -} - -// NewRoot identifies a Span as the root Span for a new trace. This is -// commonly used when an existing trace crosses trust boundaries and the -// remote parent span context should be ignored for security. -func (cfg *SpanConfig) NewRoot() bool { - return cfg.newRoot -} - -// SpanKind is the role a Span has in a trace. -func (cfg *SpanConfig) SpanKind() SpanKind { - return cfg.spanKind -} - -// NewSpanStartConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanStart(c) - } - return c -} - -// NewSpanEndConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanEnd(c) - } - return c -} - -// SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created. -type SpanStartOption interface { - applySpanStart(SpanConfig) SpanConfig -} - -type spanOptionFunc func(SpanConfig) SpanConfig - -func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { - return fn(cfg) -} - -// SpanEndOption applies an option to a SpanConfig. These options are -// applicable only when the span is ended. -type SpanEndOption interface { - applySpanEnd(SpanConfig) SpanConfig -} - -// EventConfig is a group of options for an Event. -type EventConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - stackTrace bool -} - -// Attributes describe the associated qualities of an Event. -func (cfg *EventConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in an Event life-cycle. -func (cfg *EventConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *EventConfig) StackTrace() bool { - return cfg.stackTrace -} - -// NewEventConfig applies all the EventOptions to a returned EventConfig. If no -// timestamp option is passed, the returned EventConfig will have a Timestamp -// set to the call time, otherwise no validation is performed on the returned -// EventConfig. -func NewEventConfig(options ...EventOption) EventConfig { - var c EventConfig - for _, option := range options { - c = option.applyEvent(c) - } - if c.timestamp.IsZero() { - c.timestamp = time.Now() - } - return c -} - -// EventOption applies span event options to an EventConfig. -type EventOption interface { - applyEvent(EventConfig) EventConfig -} - -// SpanOption are options that can be used at both the beginning and end of a span. -type SpanOption interface { - SpanStartOption - SpanEndOption -} - -// SpanStartEventOption are options that can be used at the start of a span, or with an event. -type SpanStartEventOption interface { - SpanStartOption - EventOption -} - -// SpanEndEventOption are options that can be used at the end of a span, or with an event. -type SpanEndEventOption interface { - SpanEndOption - EventOption -} - -type attributeOption []attribute.KeyValue - -func (o attributeOption) applySpan(c SpanConfig) SpanConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} -func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o attributeOption) applyEvent(c EventConfig) EventConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} - -var _ SpanStartEventOption = attributeOption{} - -// WithAttributes adds the attributes related to a span life-cycle event. -// These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start event. Otherwise, these -// attributes provide additional information about the event being recorded -// (e.g. error, state change, processing progress, system event). -// -// If multiple of these options are passed the attributes of each successive -// option will extend the attributes instead of overwriting. There is no -// guarantee of uniqueness in the resulting attributes. -func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { - return attributeOption(attributes) -} - -// SpanEventOption are options that can be used with an event or a span. -type SpanEventOption interface { - SpanOption - EventOption -} - -type timestampOption time.Time - -func (o timestampOption) applySpan(c SpanConfig) SpanConfig { - c.timestamp = time.Time(o) - return c -} -func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applyEvent(c EventConfig) EventConfig { - c.timestamp = time.Time(o) - return c -} - -var _ SpanEventOption = timestampOption{} - -// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. -// started, stopped, errored). -func WithTimestamp(t time.Time) SpanEventOption { - return timestampOption(t) -} - -type stackTraceOption bool - -func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { - c.stackTrace = bool(o) - return c -} - -func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } - -// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). -func WithStackTrace(b bool) SpanEndEventOption { - return stackTraceOption(b) -} - -// WithLinks adds links to a Span. The links are added to the existing Span -// links, i.e. this does not overwrite. Links with invalid span context are ignored. -func WithLinks(links ...Link) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.links = append(cfg.links, links...) - return cfg - }) -} - -// WithNewRoot specifies that the Span should be treated as a root Span. Any -// existing parent span context will be ignored when defining the Span's trace -// identifiers. -func WithNewRoot() SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.newRoot = true - return cfg - }) -} - -// WithSpanKind sets the SpanKind of a Span. -func WithSpanKind(kind SpanKind) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.spanKind = kind - return cfg - }) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.instrumentationVersion = version - return cfg - }) -} - -// WithInstrumentationAttributes sets the instrumentation attributes. -// -// The passed attributes will be de-duplicated. -func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { - return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) - return config - }) -} - -// WithSchemaURL sets the schema URL for the Tracer. -func WithSchemaURL(schemaURL string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.schemaURL = schemaURL - return cfg - }) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/context.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/context.go deleted file mode 100644 index 8c45a7107f..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/context.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import "context" - -type traceContextKeyType int - -const currentSpanKey traceContextKeyType = iota - -// ContextWithSpan returns a copy of parent with span set as the current Span. -func ContextWithSpan(parent context.Context, span Span) context.Context { - return context.WithValue(parent, currentSpanKey, span) -} - -// ContextWithSpanContext returns a copy of parent with sc as the current -// Span. The Span implementation that wraps sc is non-recording and performs -// no operations other than to return sc as the SpanContext from the -// SpanContext method. -func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { - return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) -} - -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly -// as a remote SpanContext and as the current Span. The Span implementation -// that wraps rsc is non-recording and performs no operations other than to -// return rsc as the SpanContext from the SpanContext method. -func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { - return ContextWithSpanContext(parent, rsc.WithRemote(true)) -} - -// SpanFromContext returns the current Span from ctx. -// -// If no Span is currently set in ctx an implementation of a Span that -// performs no operations is returned. -func SpanFromContext(ctx context.Context) Span { - if ctx == nil { - return noopSpanInstance - } - if span, ok := ctx.Value(currentSpanKey).(Span); ok { - return span - } - return noopSpanInstance -} - -// SpanContextFromContext returns the current Span's SpanContext. -func SpanContextFromContext(ctx context.Context) SpanContext { - return SpanFromContext(ctx).SpanContext() -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/doc.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/doc.go deleted file mode 100644 index cdbf41d6d7..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/doc.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package trace provides an implementation of the tracing part of the -OpenTelemetry API. - -To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. In its simplest form: - - var tracer trace.Tracer - - func init() { - tracer = otel.Tracer("instrumentation/package/name") - } - - func operation(ctx context.Context) { - var span trace.Span - ctx, span = tracer.Start(ctx, "operation") - defer span.End() - // ... - } - -A Tracer is unique to the instrumentation and is used to create Spans. -Instrumentation should be designed to accept a TracerProvider from which it -can create its own unique Tracer. Alternatively, the registered global -TracerProvider from the go.opentelemetry.io/otel package can be used as -a default. - - const ( - name = "instrumentation/package/name" - version = "0.1.0" - ) - - type Instrumentation struct { - tracer trace.Tracer - } - - func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { - if tp == nil { - tp = otel.TracerProvider() - } - return &Instrumentation{ - tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), - } - } - - func operation(ctx context.Context, inst *Instrumentation) { - var span trace.Span - ctx, span = inst.tracer.Start(ctx, "operation") - defer span.End() - // ... - } - -# API Implementations - -This package does not conform to the standard Go versioning policy; all of its -interfaces may have methods added to them without a package major version bump. -This non-standard API evolution could surprise an uninformed implementation -author. They could unknowingly build their implementation in a way that would -result in a runtime panic for their users that update to the new API. - -The API is designed to help inform an instrumentation author about this -non-standard API evolution. It requires them to choose a default behavior for -unimplemented interface methods. There are three behavior choices they can -make: - - - Compilation failure - - Panic - - Default to another implementation - -All interfaces in this API embed a corresponding interface from -[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default -behavior of their implementations to be a compilation failure, signaling to -their users they need to update to the latest version of that implementation, -they need to embed the corresponding interface from -[go.opentelemetry.io/otel/trace/embedded] in their implementation. For -example, - - import "go.opentelemetry.io/otel/trace/embedded" - - type TracerProvider struct { - embedded.TracerProvider - // ... - } - -If an author wants the default behavior of their implementations to panic, they -can embed the API interface directly. - - import "go.opentelemetry.io/otel/trace" - - type TracerProvider struct { - trace.TracerProvider - // ... - } - -This option is not recommended. It will lead to publishing packages that -contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a transitive -dependency. - -Finally, an author can embed another implementation in theirs. The embedded -implementation will be used for methods not defined by the author. For example, -an author who wants to default to silently dropping the call can use -[go.opentelemetry.io/otel/trace/noop]: - - import "go.opentelemetry.io/otel/trace/noop" - - type TracerProvider struct { - noop.TracerProvider - // ... - } - -It is strongly recommended that authors only embed -[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. -That implementation is the only one OpenTelemetry authors can guarantee will -fully implement all the API interfaces when a user updates their API. -*/ -package trace // import "go.opentelemetry.io/otel/trace" diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/embedded/README.md deleted file mode 100644 index 7754a239ee..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/embedded/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Trace Embedded - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go deleted file mode 100644 index 3e359a00bf..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package embedded provides interfaces embedded within the [OpenTelemetry -// trace API]. -// -// Implementers of the [OpenTelemetry trace API] can embed the relevant type -// from this package into their implementation directly. Doing so will result -// in a compilation error for users when the [OpenTelemetry trace API] is -// extended (which is something that can happen without a major version bump of -// the API package). -// -// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace -package embedded // import "go.opentelemetry.io/otel/trace/embedded" - -// TracerProvider is embedded in -// [go.opentelemetry.io/otel/trace.TracerProvider]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to -// experience a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type TracerProvider interface{ tracerProvider() } - -// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Tracer interface{ tracer() } - -// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Span interface{ span() } diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go deleted file mode 100644 index f663547b4e..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -// Attr is a key-value pair. -type Attr struct { - Key string `json:"key,omitempty"` - Value Value `json:"value,omitempty"` -} - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return Attr{key, StringValue(value)} -} - -// Int64 returns an Attr for an int64 value. -func Int64(key string, value int64) Attr { - return Attr{key, Int64Value(value)} -} - -// Int returns an Attr for an int value. -func Int(key string, value int) Attr { - return Int64(key, int64(value)) -} - -// Float64 returns an Attr for a float64 value. -func Float64(key string, value float64) Attr { - return Attr{key, Float64Value(value)} -} - -// Bool returns an Attr for a bool value. -func Bool(key string, value bool) Attr { - return Attr{key, BoolValue(value)} -} - -// Bytes returns an Attr for a []byte value. -// The passed slice must not be changed after it is passed. -func Bytes(key string, value []byte) Attr { - return Attr{key, BytesValue(value)} -} - -// Slice returns an Attr for a []Value value. -// The passed slice must not be changed after it is passed. -func Slice(key string, value ...Value) Attr { - return Attr{key, SliceValue(value...)} -} - -// Map returns an Attr for a map value. -// The passed slice must not be changed after it is passed. -func Map(key string, value ...Attr) Attr { - return Attr{key, MapValue(value...)} -} - -// Equal returns if a is equal to b. -func (a Attr) Equal(b Attr) bool { - return a.Key == b.Key && a.Value.Equal(b.Value) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go deleted file mode 100644 index 5debe90bbb..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package telemetry provides a lightweight representations of OpenTelemetry -telemetry that is compatible with the OTLP JSON protobuf encoding. -*/ -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go deleted file mode 100644 index 7b1ae3c4ea..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "encoding/hex" - "errors" - "fmt" -) - -const ( - traceIDSize = 16 - spanIDSize = 8 -) - -// TraceID is a custom data type that is used for all trace IDs. -type TraceID [traceIDSize]byte - -// String returns the hex string representation form of a TraceID. -func (tid TraceID) String() string { - return hex.EncodeToString(tid[:]) -} - -// IsEmpty returns false if id contains at least one non-zero byte. -func (tid TraceID) IsEmpty() bool { - return tid == [traceIDSize]byte{} -} - -// MarshalJSON converts the trace ID into a hex string enclosed in quotes. -func (tid TraceID) MarshalJSON() ([]byte, error) { - if tid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(tid[:]) -} - -// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in -// quotes. -func (tid *TraceID) UnmarshalJSON(data []byte) error { - *tid = [traceIDSize]byte{} - return unmarshalJSON(tid[:], data) -} - -// SpanID is a custom data type that is used for all span IDs. -type SpanID [spanIDSize]byte - -// String returns the hex string representation form of a SpanID. -func (sid SpanID) String() string { - return hex.EncodeToString(sid[:]) -} - -// IsEmpty returns true if the span ID contains at least one non-zero byte. -func (sid SpanID) IsEmpty() bool { - return sid == [spanIDSize]byte{} -} - -// MarshalJSON converts span ID into a hex string enclosed in quotes. -func (sid SpanID) MarshalJSON() ([]byte, error) { - if sid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(sid[:]) -} - -// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. -func (sid *SpanID) UnmarshalJSON(data []byte) error { - *sid = [spanIDSize]byte{} - return unmarshalJSON(sid[:], data) -} - -// marshalJSON converts id into a hex string enclosed in quotes. -func marshalJSON(id []byte) ([]byte, error) { - // Plus 2 quote chars at the start and end. - hexLen := hex.EncodedLen(len(id)) + 2 - - b := make([]byte, hexLen) - hex.Encode(b[1:hexLen-1], id) - b[0], b[hexLen-1] = '"', '"' - - return b, nil -} - -// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { - if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { - src = src[1 : l-1] - } - nLen := len(src) - if nLen == 0 { - return nil - } - - if len(dst) != hex.DecodedLen(nLen) { - return errors.New("invalid length for ID") - } - - _, err := hex.Decode(dst, src) - if err != nil { - return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) - } - return nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go deleted file mode 100644 index f5e3a8cec9..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "encoding/json" - "strconv" -) - -// protoInt64 represents the protobuf encoding of integers which can be either -// strings or integers. -type protoInt64 int64 - -// Int64 returns the protoInt64 as an int64. -func (i *protoInt64) Int64() int64 { return int64(*i) } - -// UnmarshalJSON decodes both strings and integers. -func (i *protoInt64) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - var str string - if err := json.Unmarshal(data, &str); err != nil { - return err - } - parsedInt, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return err - } - *i = protoInt64(parsedInt) - } else { - var parsedInt int64 - if err := json.Unmarshal(data, &parsedInt); err != nil { - return err - } - *i = protoInt64(parsedInt) - } - return nil -} - -// protoUint64 represents the protobuf encoding of integers which can be either -// strings or integers. -type protoUint64 uint64 - -// Int64 returns the protoUint64 as a uint64. -func (i *protoUint64) Uint64() uint64 { return uint64(*i) } - -// UnmarshalJSON decodes both strings and integers. -func (i *protoUint64) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - var str string - if err := json.Unmarshal(data, &str); err != nil { - return err - } - parsedUint, err := strconv.ParseUint(str, 10, 64) - if err != nil { - return err - } - *i = protoUint64(parsedUint) - } else { - var parsedUint uint64 - if err := json.Unmarshal(data, &parsedUint); err != nil { - return err - } - *i = protoUint64(parsedUint) - } - return nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go deleted file mode 100644 index 1798a702d4..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Resource information. -type Resource struct { - // Attrs are the set of attributes that describe the resource. Attribute - // keys MUST be unique (it is not allowed to have more than one attribute - // with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // DroppedAttrs is the number of dropped attributes. If the value - // is 0, then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. -func (r *Resource) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Resource type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Resource field: %#v", keyIface) - } - - switch key { - case "attributes": - err = decoder.Decode(&r.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&r.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go deleted file mode 100644 index c2b4c635b7..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Scope is the identifying values of the instrumentation scope. -type Scope struct { - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` - Attrs []Attr `json:"attributes,omitempty"` - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. -func (s *Scope) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Scope type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Scope field: %#v", keyIface) - } - - switch key { - case "name": - err = decoder.Decode(&s.Name) - case "version": - err = decoder.Decode(&s.Version) - case "attributes": - err = decoder.Decode(&s.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&s.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go deleted file mode 100644 index e7ca62c660..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "time" -) - -// A Span represents a single operation performed by a single component of the -// system. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - TraceID TraceID `json:"traceId,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - SpanID SpanID `json:"spanId,omitempty"` - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - TraceState string `json:"traceState,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanID SpanID `json:"parentSpanId,omitempty"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - Flags uint32 `json:"flags,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - Name string `json:"name"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind SpanKind `json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTime time.Time `json:"startTimeUnixNano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTime time.Time `json:"endTimeUnixNano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` - // events is a collection of Event items. - Events []*SpanEvent `json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - Links []*SpanLink `json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status *Status `json:"status,omitempty"` -} - -// MarshalJSON encodes s into OTLP formatted JSON. -func (s Span) MarshalJSON() ([]byte, error) { - startT := s.StartTime.UnixNano() - if s.StartTime.IsZero() || startT < 0 { - startT = 0 - } - - endT := s.EndTime.UnixNano() - if s.EndTime.IsZero() || endT < 0 { - endT = 0 - } - - // Override non-empty default SpanID marshal and omitempty. - var parentSpanId string - if !s.ParentSpanID.IsEmpty() { - b := make([]byte, hex.EncodedLen(spanIDSize)) - hex.Encode(b, s.ParentSpanID[:]) - parentSpanId = string(b) - } - - type Alias Span - return json.Marshal(struct { - Alias - ParentSpanID string `json:"parentSpanId,omitempty"` - StartTime uint64 `json:"startTimeUnixNano,omitempty"` - EndTime uint64 `json:"endTimeUnixNano,omitempty"` - }{ - Alias: Alias(s), - ParentSpanID: parentSpanId, - StartTime: uint64(startT), // nolint:gosec // >0 checked above. - EndTime: uint64(endT), // nolint:gosec // >0 checked above. - }) -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. -func (s *Span) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Span type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Span field: %#v", keyIface) - } - - switch key { - case "traceId", "trace_id": - err = decoder.Decode(&s.TraceID) - case "spanId", "span_id": - err = decoder.Decode(&s.SpanID) - case "traceState", "trace_state": - err = decoder.Decode(&s.TraceState) - case "parentSpanId", "parent_span_id": - err = decoder.Decode(&s.ParentSpanID) - case "flags": - err = decoder.Decode(&s.Flags) - case "name": - err = decoder.Decode(&s.Name) - case "kind": - err = decoder.Decode(&s.Kind) - case "startTimeUnixNano", "start_time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. - s.StartTime = time.Unix(0, v) - case "endTimeUnixNano", "end_time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. - s.EndTime = time.Unix(0, v) - case "attributes": - err = decoder.Decode(&s.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&s.DroppedAttrs) - case "events": - err = decoder.Decode(&s.Events) - case "droppedEventsCount", "dropped_events_count": - err = decoder.Decode(&s.DroppedEvents) - case "links": - err = decoder.Decode(&s.Links) - case "droppedLinksCount", "dropped_links_count": - err = decoder.Decode(&s.DroppedLinks) - case "status": - err = decoder.Decode(&s.Status) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -type SpanFlags int32 - -const ( - // SpanFlagsTraceFlagsMask is a mask for trace-flags. - // - // Bits 0-7 are used for trace flags. - SpanFlagsTraceFlagsMask SpanFlags = 255 - // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is - // remote. - SpanFlagsContextIsRemoteMask SpanFlags = 512 -) - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type SpanKind int32 - -const ( - // SpanKindInternal indicates that the span represents an internal - // operation within an application, as opposed to an operation happening at - // the boundaries. - SpanKindInternal SpanKind = 1 - // SpanKindServer indicates that the span covers server-side handling of an - // RPC or other remote network request. - SpanKindServer SpanKind = 2 - // SpanKindClient indicates that the span describes a request to some - // remote service. - SpanKindClient SpanKind = 3 - // SpanKindProducer indicates that the span describes a producer sending a - // message to a broker. Unlike SpanKindClient and SpanKindServer, there is - // often no direct critical path latency relationship between producer and - // consumer spans. A SpanKindProducer span ends when the message was - // accepted by the broker while the logical processing of the message might - // span a much longer time. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer indicates that the span describes a consumer receiving - // a message from a broker. Like SpanKindProducer, there is often no direct - // critical path latency relationship between producer and consumer spans. - SpanKindConsumer SpanKind = 5 -) - -// SpanEvent is a time-stamped annotation of the span, consisting of -// user-supplied text description and key-value pairs. -type SpanEvent struct { - // time_unix_nano is the time the event occurred. - Time time.Time `json:"timeUnixNano,omitempty"` - // name of the event. - // This field is semantically required to be set to non-empty string. - Name string `json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// MarshalJSON encodes e into OTLP formatted JSON. -func (e SpanEvent) MarshalJSON() ([]byte, error) { - t := e.Time.UnixNano() - if e.Time.IsZero() || t < 0 { - t = 0 - } - - type Alias SpanEvent - return json.Marshal(struct { - Alias - Time uint64 `json:"timeUnixNano,omitempty"` - }{ - Alias: Alias(e), - Time: uint64(t), // nolint: gosec // >0 checked above - }) -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. -func (se *SpanEvent) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid SpanEvent type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) - } - - switch key { - case "timeUnixNano", "time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. - se.Time = time.Unix(0, v) - case "name": - err = decoder.Decode(&se.Name) - case "attributes": - err = decoder.Decode(&se.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&se.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// SpanLink is a reference from the current span to another span in the same -// trace or in a different trace. For example, this can be used in batching -// operations, where a single batch handler processes multiple requests from -// different traces or when the handler receives a request from a different -// project. -type SpanLink struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceID TraceID `json:"traceId,omitempty"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanID SpanID `json:"spanId,omitempty"` - // The trace_state associated with the link. - TraceState string `json:"traceState,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - Flags uint32 `json:"flags,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. -func (sl *SpanLink) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid SpanLink type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid SpanLink field: %#v", keyIface) - } - - switch key { - case "traceId", "trace_id": - err = decoder.Decode(&sl.TraceID) - case "spanId", "span_id": - err = decoder.Decode(&sl.SpanID) - case "traceState", "trace_state": - err = decoder.Decode(&sl.TraceState) - case "attributes": - err = decoder.Decode(&sl.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&sl.DroppedAttrs) - case "flags": - err = decoder.Decode(&sl.Flags) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go deleted file mode 100644 index 1039bf40cd..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -// StatusCode is the status of a Span. -// -// For the semantics of status codes see -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status -type StatusCode int32 - -const ( - // StatusCodeUnset is the default status. - StatusCodeUnset StatusCode = 0 - // StatusCodeOK is used when the Span has been validated by an Application - // developer or Operator to have completed successfully. - StatusCodeOK StatusCode = 1 - // StatusCodeError is used when the Span contains an error. - StatusCodeError StatusCode = 2 -) - -var statusCodeStrings = []string{ - "Unset", - "OK", - "Error", -} - -func (s StatusCode) String() string { - if s >= 0 && int(s) < len(statusCodeStrings) { - return statusCodeStrings[s] - } - return "" -} - -// Status defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -type Status struct { - // A developer-facing human readable error message. - Message string `json:"message,omitempty"` - // The status code. - Code StatusCode `json:"code,omitempty"` -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go deleted file mode 100644 index e5f10767ca..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Traces represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type Traces struct { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. -func (td *Traces) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid TracesData type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid TracesData field: %#v", keyIface) - } - - switch key { - case "resourceSpans", "resource_spans": - err = decoder.Decode(&td.ResourceSpans) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// ResourceSpans is a collection of ScopeSpans from a Resource. -type ResourceSpans struct { - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - Resource Resource `json:"resource"` - // A list of ScopeSpans that originate from a resource. - ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - SchemaURL string `json:"schemaUrl,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. -func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid ResourceSpans type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) - } - - switch key { - case "resource": - err = decoder.Decode(&rs.Resource) - case "scopeSpans", "scope_spans": - err = decoder.Decode(&rs.ScopeSpans) - case "schemaUrl", "schema_url": - err = decoder.Decode(&rs.SchemaURL) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// ScopeSpans is a collection of Spans produced by an InstrumentationScope. -type ScopeSpans struct { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope *Scope `json:"scope"` - // A list of Spans that originate from an instrumentation scope. - Spans []*Span `json:"spans,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. - SchemaURL string `json:"schemaUrl,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. -func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid ScopeSpans type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) - } - - switch key { - case "scope": - err = decoder.Decode(&ss.Scope) - case "spans": - err = decoder.Decode(&ss.Spans) - case "schemaUrl", "schema_url": - err = decoder.Decode(&ss.SchemaURL) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go deleted file mode 100644 index ae9ce102a9..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "cmp" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "slices" - "strconv" - "unsafe" -) - -// A Value represents a structured value. -// A zero value is valid and represents an empty value. -type Value struct { - // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. - - // num holds the value for Int64, Float64, and Bool. It holds the length - // for String, Bytes, Slice, Map. - num uint64 - // any holds either the KindBool, KindInt64, KindFloat64, stringptr, - // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 - // then the value of Value is in num as described above. Otherwise, it - // contains the value wrapped in the appropriate type. - any any -} - -type ( - // sliceptr represents a value in Value.any for KindString Values. - stringptr *byte - // bytesptr represents a value in Value.any for KindBytes Values. - bytesptr *byte - // sliceptr represents a value in Value.any for KindSlice Values. - sliceptr *Value - // mapptr represents a value in Value.any for KindMap Values. - mapptr *Attr -) - -// ValueKind is the kind of a [Value]. -type ValueKind int - -// ValueKind values. -const ( - ValueKindEmpty ValueKind = iota - ValueKindBool - ValueKindFloat64 - ValueKindInt64 - ValueKindString - ValueKindBytes - ValueKindSlice - ValueKindMap -) - -var valueKindStrings = []string{ - "Empty", - "Bool", - "Float64", - "Int64", - "String", - "Bytes", - "Slice", - "Map", -} - -func (k ValueKind) String() string { - if k >= 0 && int(k) < len(valueKindStrings) { - return valueKindStrings[k] - } - return "" -} - -// StringValue returns a new [Value] for a string. -func StringValue(v string) Value { - return Value{ - num: uint64(len(v)), - any: stringptr(unsafe.StringData(v)), - } -} - -// IntValue returns a [Value] for an int. -func IntValue(v int) Value { return Int64Value(int64(v)) } - -// Int64Value returns a [Value] for an int64. -func Int64Value(v int64) Value { - return Value{ - num: uint64(v), // nolint: gosec // Store raw bytes. - any: ValueKindInt64, - } -} - -// Float64Value returns a [Value] for a float64. -func Float64Value(v float64) Value { - return Value{num: math.Float64bits(v), any: ValueKindFloat64} -} - -// BoolValue returns a [Value] for a bool. -func BoolValue(v bool) Value { //nolint:revive // Not a control flag. - var n uint64 - if v { - n = 1 - } - return Value{num: n, any: ValueKindBool} -} - -// BytesValue returns a [Value] for a byte slice. The passed slice must not be -// changed after it is passed. -func BytesValue(v []byte) Value { - return Value{ - num: uint64(len(v)), - any: bytesptr(unsafe.SliceData(v)), - } -} - -// SliceValue returns a [Value] for a slice of [Value]. The passed slice must -// not be changed after it is passed. -func SliceValue(vs ...Value) Value { - return Value{ - num: uint64(len(vs)), - any: sliceptr(unsafe.SliceData(vs)), - } -} - -// MapValue returns a new [Value] for a slice of key-value pairs. The passed -// slice must not be changed after it is passed. -func MapValue(kvs ...Attr) Value { - return Value{ - num: uint64(len(kvs)), - any: mapptr(unsafe.SliceData(kvs)), - } -} - -// AsString returns the value held by v as a string. -func (v Value) AsString() string { - if sp, ok := v.any.(stringptr); ok { - return unsafe.String(sp, v.num) - } - // TODO: error handle - return "" -} - -// asString returns the value held by v as a string. It will panic if the Value -// is not KindString. -func (v Value) asString() string { - return unsafe.String(v.any.(stringptr), v.num) -} - -// AsInt64 returns the value held by v as an int64. -func (v Value) AsInt64() int64 { - if v.Kind() != ValueKindInt64 { - // TODO: error handle - return 0 - } - return v.asInt64() -} - -// asInt64 returns the value held by v as an int64. If v is not of KindInt64, -// this will return garbage. -func (v Value) asInt64() int64 { - // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec -} - -// AsBool returns the value held by v as a bool. -func (v Value) AsBool() bool { - if v.Kind() != ValueKindBool { - // TODO: error handle - return false - } - return v.asBool() -} - -// asBool returns the value held by v as a bool. If v is not of KindBool, this -// will return garbage. -func (v Value) asBool() bool { return v.num == 1 } - -// AsFloat64 returns the value held by v as a float64. -func (v Value) AsFloat64() float64 { - if v.Kind() != ValueKindFloat64 { - // TODO: error handle - return 0 - } - return v.asFloat64() -} - -// asFloat64 returns the value held by v as a float64. If v is not of -// KindFloat64, this will return garbage. -func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } - -// AsBytes returns the value held by v as a []byte. -func (v Value) AsBytes() []byte { - if sp, ok := v.any.(bytesptr); ok { - return unsafe.Slice((*byte)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asBytes returns the value held by v as a []byte. It will panic if the Value -// is not KindBytes. -func (v Value) asBytes() []byte { - return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) -} - -// AsSlice returns the value held by v as a []Value. -func (v Value) AsSlice() []Value { - if sp, ok := v.any.(sliceptr); ok { - return unsafe.Slice((*Value)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asSlice returns the value held by v as a []Value. It will panic if the Value -// is not KindSlice. -func (v Value) asSlice() []Value { - return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) -} - -// AsMap returns the value held by v as a []Attr. -func (v Value) AsMap() []Attr { - if sp, ok := v.any.(mapptr); ok { - return unsafe.Slice((*Attr)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asMap returns the value held by v as a []Attr. It will panic if the -// Value is not KindMap. -func (v Value) asMap() []Attr { - return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) -} - -// Kind returns the Kind of v. -func (v Value) Kind() ValueKind { - switch x := v.any.(type) { - case ValueKind: - return x - case stringptr: - return ValueKindString - case bytesptr: - return ValueKindBytes - case sliceptr: - return ValueKindSlice - case mapptr: - return ValueKindMap - default: - return ValueKindEmpty - } -} - -// Empty returns if v does not hold any value. -func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } - -// Equal returns if v is equal to w. -func (v Value) Equal(w Value) bool { - k1 := v.Kind() - k2 := w.Kind() - if k1 != k2 { - return false - } - switch k1 { - case ValueKindInt64, ValueKindBool: - return v.num == w.num - case ValueKindString: - return v.asString() == w.asString() - case ValueKindFloat64: - return v.asFloat64() == w.asFloat64() - case ValueKindSlice: - return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) - case ValueKindMap: - sv := sortMap(v.asMap()) - sw := sortMap(w.asMap()) - return slices.EqualFunc(sv, sw, Attr.Equal) - case ValueKindBytes: - return bytes.Equal(v.asBytes(), w.asBytes()) - case ValueKindEmpty: - return true - default: - // TODO: error handle - return false - } -} - -func sortMap(m []Attr) []Attr { - sm := make([]Attr, len(m)) - copy(sm, m) - slices.SortFunc(sm, func(a, b Attr) int { - return cmp.Compare(a.Key, b.Key) - }) - - return sm -} - -// String returns Value's value as a string, formatted like [fmt.Sprint]. -// -// The returned string is meant for debugging; -// the string representation is not stable. -func (v Value) String() string { - switch v.Kind() { - case ValueKindString: - return v.asString() - case ValueKindInt64: - // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec - case ValueKindFloat64: - return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) - case ValueKindBool: - return strconv.FormatBool(v.asBool()) - case ValueKindBytes: - return string(v.asBytes()) - case ValueKindMap: - return fmt.Sprint(v.asMap()) - case ValueKindSlice: - return fmt.Sprint(v.asSlice()) - case ValueKindEmpty: - return "" - default: - // Try to handle this as gracefully as possible. - // - // Don't panic here. The goal here is to have developers find this - // first if a slog.Kind is is not handled. It is - // preferable to have user's open issue asking why their attributes - // have a "unhandled: " prefix than say that their code is panicking. - return fmt.Sprintf("", v.Kind()) - } -} - -// MarshalJSON encodes v into OTLP formatted JSON. -func (v *Value) MarshalJSON() ([]byte, error) { - switch v.Kind() { - case ValueKindString: - return json.Marshal(struct { - Value string `json:"stringValue"` - }{v.asString()}) - case ValueKindInt64: - return json.Marshal(struct { - Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. - case ValueKindFloat64: - return json.Marshal(struct { - Value float64 `json:"doubleValue"` - }{v.asFloat64()}) - case ValueKindBool: - return json.Marshal(struct { - Value bool `json:"boolValue"` - }{v.asBool()}) - case ValueKindBytes: - return json.Marshal(struct { - Value []byte `json:"bytesValue"` - }{v.asBytes()}) - case ValueKindMap: - return json.Marshal(struct { - Value struct { - Values []Attr `json:"values"` - } `json:"kvlistValue"` - }{struct { - Values []Attr `json:"values"` - }{v.asMap()}}) - case ValueKindSlice: - return json.Marshal(struct { - Value struct { - Values []Value `json:"values"` - } `json:"arrayValue"` - }{struct { - Values []Value `json:"values"` - }{v.asSlice()}}) - case ValueKindEmpty: - return nil, nil - default: - return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) - } -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. -func (v *Value) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Value type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Value key: %#v", keyIface) - } - - switch key { - case "stringValue", "string_value": - var val string - err = decoder.Decode(&val) - *v = StringValue(val) - case "boolValue", "bool_value": - var val bool - err = decoder.Decode(&val) - *v = BoolValue(val) - case "intValue", "int_value": - var val protoInt64 - err = decoder.Decode(&val) - *v = Int64Value(val.Int64()) - case "doubleValue", "double_value": - var val float64 - err = decoder.Decode(&val) - *v = Float64Value(val) - case "bytesValue", "bytes_value": - var val64 string - if err := decoder.Decode(&val64); err != nil { - return err - } - var val []byte - val, err = base64.StdEncoding.DecodeString(val64) - *v = BytesValue(val) - case "arrayValue", "array_value": - var val struct{ Values []Value } - err = decoder.Decode(&val) - *v = SliceValue(val.Values...) - case "kvlistValue", "kvlist_value": - var val struct{ Values []Attr } - err = decoder.Decode(&val) - *v = MapValue(val.Values...) - default: - // Skip unknown. - continue - } - // Use first valid. Ignore the rest. - return err - } - - // Only unknown fields. Return nil without unmarshaling any value. - return nil -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/nonrecording.go deleted file mode 100644 index c00221e7be..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - noopSpan - - sc SpanContext -} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/noop.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/noop.go deleted file mode 100644 index 0f56e4dbb3..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/noop.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" -) - -// NewNoopTracerProvider returns an implementation of TracerProvider that -// performs no operations. The Tracer and Spans created from the returned -// TracerProvider also perform no operations. -// -// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] -// instead. -func NewNoopTracerProvider() TracerProvider { - return noopTracerProvider{} -} - -type noopTracerProvider struct{ embedded.TracerProvider } - -var _ TracerProvider = noopTracerProvider{} - -// Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { - return noopTracer{} -} - -// noopTracer is an implementation of Tracer that performs no operations. -type noopTracer struct{ embedded.Tracer } - -var _ Tracer = noopTracer{} - -// Start carries forward a non-recording Span, if one is present in the context, otherwise it -// creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { - span := SpanFromContext(ctx) - if _, ok := span.(nonRecordingSpan); !ok { - // span is likely already a noopSpan, but let's be sure - span = noopSpanInstance - } - return ContextWithSpan(ctx, span), span -} - -// noopSpan is an implementation of Span that performs no operations. -type noopSpan struct{ embedded.Span } - -var noopSpanInstance Span = noopSpan{} - -// SpanContext returns an empty span context. -func (noopSpan) SpanContext() SpanContext { return SpanContext{} } - -// IsRecording always returns false. -func (noopSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (noopSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (noopSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (noopSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (noopSpan) End(...SpanEndOption) {} - -// RecordError does nothing. -func (noopSpan) RecordError(error, ...EventOption) {} - -// AddEvent does nothing. -func (noopSpan) AddEvent(string, ...EventOption) {} - -// AddLink does nothing. -func (noopSpan) AddLink(Link) {} - -// SetName does nothing. -func (noopSpan) SetName(string) {} - -// TracerProvider returns a no-op TracerProvider. -func (s noopSpan) TracerProvider() TracerProvider { - return s.tracerProvider(autoInstEnabled) -} - -// autoInstEnabled defines if the auto-instrumentation SDK is enabled. -// -// The auto-instrumentation is expected to overwrite this value to true when it -// attaches to the process. -var autoInstEnabled = new(bool) - -// tracerProvider return a noopTracerProvider if autoEnabled is false, -// otherwise it will return a TracerProvider from the sdk package used in -// auto-instrumentation. -// -//go:noinline -func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { - if *autoEnabled { - return newAutoTracerProvider() - } - return noopTracerProvider{} -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/provider.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/provider.go deleted file mode 100644 index ef85cb70c6..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/provider.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import "go.opentelemetry.io/otel/trace/embedded" - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/span.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/span.go deleted file mode 100644 index d3aa476ee1..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/span.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" -) - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided -// ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/trace.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/trace.go deleted file mode 100644 index d49adf671b..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/trace.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "bytes" - "encoding/hex" - "encoding/json" -) - -const ( - // FlagsSampled is a bitmask with the sampled bit set. A SpanContext - // with the sampling bit set means the span is sampled. - FlagsSampled = TraceFlags(0x01) - - errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" - - errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" - errNilTraceID errorConst = "trace-id can't be all zero" - - errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" - errNilSpanID errorConst = "span-id can't be all zero" -) - -type errorConst string - -func (e errorConst) Error() string { - return string(e) -} - -// TraceID is a unique identity of a trace. -// nolint:revive // revive complains about stutter of `trace.TraceID`. -type TraceID [16]byte - -var ( - nilTraceID TraceID - _ json.Marshaler = nilTraceID -) - -// IsValid checks whether the trace TraceID is valid. A valid trace ID does -// not consist of zeros only. -func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) -} - -// MarshalJSON implements a custom marshal function to encode TraceID -// as a hex string. -func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String returns the hex string representation form of a TraceID. -func (t TraceID) String() string { - return hex.EncodeToString(t[:]) -} - -// SpanID is a unique identity of a span in a trace. -type SpanID [8]byte - -var ( - nilSpanID SpanID - _ json.Marshaler = nilSpanID -) - -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist -// of zeros only. -func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) -} - -// MarshalJSON implements a custom marshal function to encode SpanID -// as a hex string. -func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) -} - -// String returns the hex string representation form of a SpanID. -func (s SpanID) String() string { - return hex.EncodeToString(s[:]) -} - -// TraceIDFromHex returns a TraceID from a hex string if it is compliant with -// the W3C trace-context specification. See more at -// https://www.w3.org/TR/trace-context/#trace-id -// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. -func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} - if len(h) != 32 { - return t, errInvalidTraceIDLength - } - - if err := decodeHex(h, t[:]); err != nil { - return t, err - } - - if !t.IsValid() { - return t, errNilTraceID - } - return t, nil -} - -// SpanIDFromHex returns a SpanID from a hex string if it is compliant -// with the w3c trace-context specification. -// See more at https://www.w3.org/TR/trace-context/#parent-id -func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} - if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err - } - - if !s.IsValid() { - return s, errNilSpanID - } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } - } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err - } - - copy(b, decoded) - return nil -} - -// TraceFlags contains flags that can be set on a SpanContext. -type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. - -// IsSampled returns if the sampling bit is set in the TraceFlags. -func (tf TraceFlags) IsSampled() bool { - return tf&FlagsSampled == FlagsSampled -} - -// WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. - if sampled { - return tf | FlagsSampled - } - - return tf &^ FlagsSampled -} - -// MarshalJSON implements a custom marshal function to encode TraceFlags -// as a hex string. -func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) -} - -// String returns the hex string representation form of TraceFlags. -func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) -} - -// SpanContextConfig contains mutable fields usable for constructing -// an immutable SpanContext. -type SpanContextConfig struct { - TraceID TraceID - SpanID SpanID - TraceFlags TraceFlags - TraceState TraceState - Remote bool -} - -// NewSpanContext constructs a SpanContext using values from the provided -// SpanContextConfig. -func NewSpanContext(config SpanContextConfig) SpanContext { - return SpanContext{ - traceID: config.TraceID, - spanID: config.SpanID, - traceFlags: config.TraceFlags, - traceState: config.TraceState, - remote: config.Remote, - } -} - -// SpanContext contains identifying trace information about a Span. -type SpanContext struct { - traceID TraceID - spanID SpanID - traceFlags TraceFlags - traceState TraceState - remote bool -} - -var _ json.Marshaler = SpanContext{} - -// IsValid returns if the SpanContext is valid. A valid span context has a -// valid TraceID and SpanID. -func (sc SpanContext) IsValid() bool { - return sc.HasTraceID() && sc.HasSpanID() -} - -// IsRemote indicates whether the SpanContext represents a remotely-created Span. -func (sc SpanContext) IsRemote() bool { - return sc.remote -} - -// WithRemote returns a copy of sc with the Remote property set to remote. -func (sc SpanContext) WithRemote(remote bool) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: remote, - } -} - -// TraceID returns the TraceID from the SpanContext. -func (sc SpanContext) TraceID() TraceID { - return sc.traceID -} - -// HasTraceID checks if the SpanContext has a valid TraceID. -func (sc SpanContext) HasTraceID() bool { - return sc.traceID.IsValid() -} - -// WithTraceID returns a new SpanContext with the TraceID replaced. -func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { - return SpanContext{ - traceID: traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// SpanID returns the SpanID from the SpanContext. -func (sc SpanContext) SpanID() SpanID { - return sc.spanID -} - -// HasSpanID checks if the SpanContext has a valid SpanID. -func (sc SpanContext) HasSpanID() bool { - return sc.spanID.IsValid() -} - -// WithSpanID returns a new SpanContext with the SpanID replaced. -func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceFlags returns the flags from the SpanContext. -func (sc SpanContext) TraceFlags() TraceFlags { - return sc.traceFlags -} - -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. -func (sc SpanContext) IsSampled() bool { - return sc.traceFlags.IsSampled() -} - -// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. -func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: flags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceState returns the TraceState from the SpanContext. -func (sc SpanContext) TraceState() TraceState { - return sc.traceState -} - -// WithTraceState returns a new SpanContext with the TraceState replaced. -func (sc SpanContext) WithTraceState(state TraceState) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: state, - remote: sc.remote, - } -} - -// Equal is a predicate that determines whether two SpanContext values are equal. -func (sc SpanContext) Equal(other SpanContext) bool { - return sc.traceID == other.traceID && - sc.spanID == other.spanID && - sc.traceFlags == other.traceFlags && - sc.traceState.String() == other.traceState.String() && - sc.remote == other.remote -} - -// MarshalJSON implements a custom marshal function to encode a SpanContext. -func (sc SpanContext) MarshalJSON() ([]byte, error) { - return json.Marshal(SpanContextConfig{ - TraceID: sc.traceID, - SpanID: sc.spanID, - TraceFlags: sc.traceFlags, - TraceState: sc.traceState, - Remote: sc.remote, - }) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go deleted file mode 100644 index 77952d2a0b..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/trace/embedded" -) - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} diff --git a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/openshift/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go deleted file mode 100644 index dc5e34cad0..0000000000 --- a/openshift/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "encoding/json" - "fmt" - "strings" -) - -const ( - maxListMembers = 32 - - listDelimiters = "," - memberDelimiter = "=" - - errInvalidKey errorConst = "invalid tracestate key" - errInvalidValue errorConst = "invalid tracestate value" - errInvalidMember errorConst = "invalid tracestate list-member" - errMemberNumber errorConst = "too many list-members in tracestate" - errDuplicate errorConst = "duplicate list-member in tracestate" -) - -type member struct { - Key string - Value string -} - -// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) -// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . -func checkValueChar(v byte) bool { - return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' -} - -// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . -func checkValueLast(v byte) bool { - return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' -} - -// based on the W3C Trace Context specification -// -// value = (0*255(chr)) nblk-chr -// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E -// chr = %x20 / nblk-chr -// -// see https://www.w3.org/TR/trace-context-1/#value -func checkValue(val string) bool { - n := len(val) - if n == 0 || n > 256 { - return false - } - for i := 0; i < n-1; i++ { - if !checkValueChar(val[i]) { - return false - } - } - return checkValueLast(val[n-1]) -} - -func checkKeyRemain(key string) bool { - // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) - for _, v := range key { - if isAlphaNum(byte(v)) { - continue - } - switch v { - case '_', '-', '*', '/': - continue - } - return false - } - return true -} - -// according to -// -// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// -// param n is remain part length, should be 255 in simple-key or 13 in system-id. -func checkKeyPart(key string, n int) bool { - if len(key) == 0 { - return false - } - first := key[0] // key's first char - ret := len(key[1:]) <= n - ret = ret && first >= 'a' && first <= 'z' - return ret && checkKeyRemain(key[1:]) -} - -func isAlphaNum(c byte) bool { - if c >= 'a' && c <= 'z' { - return true - } - return c >= '0' && c <= '9' -} - -// according to -// -// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) -// -// param n is remain part length, should be 240 exactly. -func checkKeyTenant(key string, n int) bool { - if len(key) == 0 { - return false - } - return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) -} - -// based on the W3C Trace Context specification -// -// key = simple-key / multi-tenant-key -// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// multi-tenant-key = tenant-id "@" system-id -// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// lcalpha = %x61-7A ; a-z -// -// see https://www.w3.org/TR/trace-context-1/#tracestate-header. -func checkKey(key string) bool { - tenant, system, ok := strings.Cut(key, "@") - if !ok { - return checkKeyPart(key, 255) - } - return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) -} - -func newMember(key, value string) (member, error) { - if !checkKey(key) { - return member{}, errInvalidKey - } - if !checkValue(value) { - return member{}, errInvalidValue - } - return member{Key: key, Value: value}, nil -} - -func parseMember(m string) (member, error) { - key, val, ok := strings.Cut(m, memberDelimiter) - if !ok { - return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) - } - key = strings.TrimLeft(key, " \t") - val = strings.TrimRight(val, " \t") - result, e := newMember(key, val) - if e != nil { - return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) - } - return result, nil -} - -// String encodes member into a string compliant with the W3C Trace Context -// specification. -func (m member) String() string { - return m.Key + "=" + m.Value -} - -// TraceState provides additional vendor-specific trace identification -// information across different distributed tracing systems. It represents an -// immutable list consisting of key/value pairs, each pair is referred to as a -// list-member. -// -// TraceState conforms to the W3C Trace Context specification -// (https://www.w3.org/TR/trace-context-1). All operations that create or copy -// a TraceState do so by validating all input and will only produce TraceState -// that conform to the specification. Specifically, this means that all -// list-member's key/value pairs are valid, no duplicate list-members exist, -// and the maximum number of list-members (32) is not exceeded. -type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` - // list is the members in order. - list []member -} - -var _ json.Marshaler = TraceState{} - -// ParseTraceState attempts to decode a TraceState from the passed -// string. It returns an error if the input is invalid according to the W3C -// Trace Context specification. -func ParseTraceState(ts string) (TraceState, error) { - if ts == "" { - return TraceState{}, nil - } - - wrapErr := func(err error) error { - return fmt.Errorf("failed to parse tracestate: %w", err) - } - - var members []member - found := make(map[string]struct{}) - for ts != "" { - var memberStr string - memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if len(memberStr) == 0 { - continue - } - - m, err := parseMember(memberStr) - if err != nil { - return TraceState{}, wrapErr(err) - } - - if _, ok := found[m.Key]; ok { - return TraceState{}, wrapErr(errDuplicate) - } - found[m.Key] = struct{}{} - - members = append(members, m) - if n := len(members); n > maxListMembers { - return TraceState{}, wrapErr(errMemberNumber) - } - } - - return TraceState{list: members}, nil -} - -// MarshalJSON marshals the TraceState into JSON. -func (ts TraceState) MarshalJSON() ([]byte, error) { - return json.Marshal(ts.String()) -} - -// String encodes the TraceState into a string compliant with the W3C -// Trace Context specification. The returned string will be invalid if the -// TraceState contains any invalid members. -func (ts TraceState) String() string { - if len(ts.list) == 0 { - return "" - } - var n int - n += len(ts.list) // member delimiters: '=' - n += len(ts.list) - 1 // list delimiters: ',' - for _, mem := range ts.list { - n += len(mem.Key) - n += len(mem.Value) - } - - var sb strings.Builder - sb.Grow(n) - _, _ = sb.WriteString(ts.list[0].Key) - _ = sb.WriteByte('=') - _, _ = sb.WriteString(ts.list[0].Value) - for i := 1; i < len(ts.list); i++ { - _ = sb.WriteByte(listDelimiters[0]) - _, _ = sb.WriteString(ts.list[i].Key) - _ = sb.WriteByte('=') - _, _ = sb.WriteString(ts.list[i].Value) - } - return sb.String() -} - -// Get returns the value paired with key from the corresponding TraceState -// list-member if it exists, otherwise an empty string is returned. -func (ts TraceState) Get(key string) string { - for _, member := range ts.list { - if member.Key == key { - return member.Value - } - } - - return "" -} - -// Walk walks all key value pairs in the TraceState by calling f -// Iteration stops if f returns false. -func (ts TraceState) Walk(f func(key, value string) bool) { - for _, m := range ts.list { - if !f(m.Key, m.Value) { - break - } - } -} - -// Insert adds a new list-member defined by the key/value pair to the -// TraceState. If a list-member already exists for the given key, that -// list-member's value is updated. The new or updated list-member is always -// moved to the beginning of the TraceState as specified by the W3C Trace -// Context specification. -// -// If key or value are invalid according to the W3C Trace Context -// specification an error is returned with the original TraceState. -// -// If adding a new list-member means the TraceState would have more members -// then is allowed, the new list-member will be inserted and the right-most -// list-member will be dropped in the returned TraceState. -func (ts TraceState) Insert(key, value string) (TraceState, error) { - m, err := newMember(key, value) - if err != nil { - return ts, err - } - n := len(ts.list) - found := n - for i := range ts.list { - if ts.list[i].Key == key { - found = i - } - } - cTS := TraceState{} - if found == n && n < maxListMembers { - cTS.list = make([]member, n+1) - } else { - cTS.list = make([]member, n) - } - cTS.list[0] = m - // When the number of members exceeds capacity, drop the "right-most". - copy(cTS.list[1:], ts.list[0:found]) - if found < n { - copy(cTS.list[1+found:], ts.list[found+1:]) - } - return cTS, nil -} - -// Delete returns a copy of the TraceState with the list-member identified by -// key removed. -func (ts TraceState) Delete(key string) TraceState { - members := make([]member, ts.Len()) - copy(members, ts.list) - for i, member := range ts.list { - if member.Key == key { - members = append(members[:i], members[i+1:]...) - // TraceState should contain no duplicate members. - break - } - } - return TraceState{list: members} -} - -// Len returns the number of list-members in the TraceState. -func (ts TraceState) Len() int { - return len(ts.list) -} diff --git a/openshift/tools/vendor/go.uber.org/multierr/.codecov.yml b/openshift/tools/vendor/go.uber.org/multierr/.codecov.yml deleted file mode 100644 index 6d4d1be7b5..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/openshift/tools/vendor/go.uber.org/multierr/.gitignore b/openshift/tools/vendor/go.uber.org/multierr/.gitignore deleted file mode 100644 index b9a05e3da0..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -cover.html -cover.out -/bin diff --git a/openshift/tools/vendor/go.uber.org/multierr/CHANGELOG.md b/openshift/tools/vendor/go.uber.org/multierr/CHANGELOG.md deleted file mode 100644 index f8177b978c..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/CHANGELOG.md +++ /dev/null @@ -1,95 +0,0 @@ -Releases -======== - -v1.11.0 (2023-03-28) -==================== -- `Errors` now supports any error that implements multiple-error - interface. -- Add `Every` function to allow checking if all errors in the chain - satisfies `errors.Is` against the target error. - -v1.10.0 (2023-03-08) -==================== - -- Comply with Go 1.20's multiple-error interface. -- Drop Go 1.18 support. - Per the support policy, only Go 1.19 and 1.20 are supported now. -- Drop all non-test external dependencies. - -v1.9.0 (2022-12-12) -=================== - -- Add `AppendFunc` that allow passsing functions to similar to - `AppendInvoke`. - -- Bump up yaml.v3 dependency to 3.0.1. - -v1.8.0 (2022-02-28) -=================== - -- `Combine`: perform zero allocations when there are no errors. - - -v1.7.0 (2021-05-06) -=================== - -- Add `AppendInvoke` to append into errors from `defer` blocks. - - -v1.6.0 (2020-09-14) -=================== - -- Actually drop library dependency on development-time tooling. - - -v1.5.0 (2020-02-24) -=================== - -- Drop library dependency on development-time tooling. - - -v1.4.0 (2019-11-04) -=================== - -- Add `AppendInto` function to more ergonomically build errors inside a - loop. - - -v1.3.0 (2019-10-29) -=================== - -- Switch to Go modules. - - -v1.2.0 (2019-09-26) -=================== - -- Support extracting and matching against wrapped errors with `errors.As` - and `errors.Is`. - - -v1.1.0 (2017-06-30) -=================== - -- Added an `Errors(error) []error` function to extract the underlying list of - errors for a multierr error. - - -v1.0.0 (2017-05-31) -=================== - -No changes since v0.2.0. This release is committing to making no breaking -changes to the current API in the 1.X series. - - -v0.2.0 (2017-04-11) -=================== - -- Repeatedly appending to the same error is now faster due to fewer - allocations. - - -v0.1.0 (2017-31-03) -=================== - -- Initial release diff --git a/openshift/tools/vendor/go.uber.org/multierr/LICENSE.txt b/openshift/tools/vendor/go.uber.org/multierr/LICENSE.txt deleted file mode 100644 index 413e30f7ce..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2017-2021 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/openshift/tools/vendor/go.uber.org/multierr/Makefile b/openshift/tools/vendor/go.uber.org/multierr/Makefile deleted file mode 100644 index dcb6fe723c..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# Directory to put `go install`ed binaries in. -export GOBIN ?= $(shell pwd)/bin - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) - -.PHONY: golint -golint: - @cd tools && go install golang.org/x/lint/golint - @$(GOBIN)/golint ./... - -.PHONY: staticcheck -staticcheck: - @cd tools && go install honnef.co/go/tools/cmd/staticcheck - @$(GOBIN)/staticcheck ./... - -.PHONY: lint -lint: gofmt golint staticcheck - -.PHONY: cover -cover: - go test -race -coverprofile=cover.out -coverpkg=./... -v ./... - go tool cover -html=cover.out -o cover.html diff --git a/openshift/tools/vendor/go.uber.org/multierr/README.md b/openshift/tools/vendor/go.uber.org/multierr/README.md deleted file mode 100644 index 5ab6ac40f4..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -`multierr` allows combining one or more Go `error`s together. - -## Features - -- **Idiomatic**: - multierr follows best practices in Go, and keeps your code idiomatic. - - It keeps the underlying error type hidden, - allowing you to deal in `error` values exclusively. - - It provides APIs to safely append into an error from a `defer` statement. -- **Performant**: - multierr is optimized for performance: - - It avoids allocations where possible. - - It utilizes slice resizing semantics to optimize common cases - like appending into the same error object from a loop. -- **Interoperable**: - multierr interoperates with the Go standard library's error APIs seamlessly: - - The `errors.Is` and `errors.As` functions *just work*. -- **Lightweight**: - multierr comes with virtually no dependencies. - -## Installation - -```bash -go get -u go.uber.org/multierr@latest -``` - -## Status - -Stable: No breaking changes will be made before 2.0. - -------------------------------------------------------------------------------- - -Released under the [MIT License]. - -[MIT License]: LICENSE.txt -[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr -[doc]: https://pkg.go.dev/go.uber.org/multierr -[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg -[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml -[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/openshift/tools/vendor/go.uber.org/multierr/error.go b/openshift/tools/vendor/go.uber.org/multierr/error.go deleted file mode 100644 index 3a828b2dff..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/error.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright (c) 2017-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package multierr allows combining one or more errors together. -// -// # Overview -// -// Errors can be combined with the use of the Combine function. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) -// -// If only two errors are being combined, the Append function may be used -// instead. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The underlying list of errors for a returned error object may be retrieved -// with the Errors function. -// -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } -// -// # Appending from a loop -// -// You sometimes need to append into an error from a loop. -// -// var err error -// for _, item := range items { -// err = multierr.Append(err, process(item)) -// } -// -// Cases like this may require knowledge of whether an individual instance -// failed. This usually requires introduction of a new variable. -// -// var err error -// for _, item := range items { -// if perr := process(item); perr != nil { -// log.Warn("skipping item", item) -// err = multierr.Append(err, perr) -// } -// } -// -// multierr includes AppendInto to simplify cases like this. -// -// var err error -// for _, item := range items { -// if multierr.AppendInto(&err, process(item)) { -// log.Warn("skipping item", item) -// } -// } -// -// This will append the error into the err variable, and return true if that -// individual error was non-nil. -// -// See [AppendInto] for more information. -// -// # Deferred Functions -// -// Go makes it possible to modify the return value of a function in a defer -// block if the function was using named returns. This makes it possible to -// record resource cleanup failures from deferred blocks. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } -// -// multierr provides the Invoker type and AppendInvoke function to make cases -// like the above simpler and obviate the need for a closure. The following is -// roughly equivalent to the example above. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(conn)) -// // ... -// } -// -// See [AppendInvoke] and [Invoker] for more information. -// -// NOTE: If you're modifying an error from inside a defer, you MUST use a named -// return value for that function. -// -// # Advanced Usage -// -// Errors returned by Combine and Append MAY implement the following -// interface. -// -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } -// -// Note that if you need access to list of errors behind a multierr error, you -// should prefer using the Errors function. That said, if you need cheap -// read-only access to the underlying errors slice, you can attempt to cast -// the error to this interface. You MUST handle the failure case gracefully -// because errors returned by Combine and Append are not guaranteed to -// implement this interface. -// -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } -package multierr // import "go.uber.org/multierr" - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" - "sync" - "sync/atomic" -) - -var ( - // Separator for single-line error messages. - _singlelineSeparator = []byte("; ") - - // Prefix for multi-line messages - _multilinePrefix = []byte("the following errors occurred:") - - // Prefix for the first and following lines of an item in a list of - // multi-line error messages. - // - // For example, if a single item is: - // - // foo - // bar - // - // It will become, - // - // - foo - // bar - _multilineSeparator = []byte("\n - ") - _multilineIndent = []byte(" ") -) - -// _bufferPool is a pool of bytes.Buffers. -var _bufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, -} - -type errorGroup interface { - Errors() []error -} - -// Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, a nil slice is returned. -// -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) -// -// If the error is not composed of other errors, the returned slice contains -// just the error that was passed in. -// -// Callers of this function are free to modify the returned slice. -func Errors(err error) []error { - return extractErrors(err) -} - -// multiError is an error that holds one or more errors. -// -// An instance of this is guaranteed to be non-empty and flattened. That is, -// none of the errors inside multiError are other multiErrors. -// -// multiError formats to a semi-colon delimited list of error messages with -// %v and with a more readable multi-line format with %+v. -type multiError struct { - copyNeeded atomic.Bool - errors []error -} - -// Errors returns the list of underlying errors. -// -// This slice MUST NOT be modified. -func (merr *multiError) Errors() []error { - if merr == nil { - return nil - } - return merr.errors -} - -func (merr *multiError) Error() string { - if merr == nil { - return "" - } - - buff := _bufferPool.Get().(*bytes.Buffer) - buff.Reset() - - merr.writeSingleline(buff) - - result := buff.String() - _bufferPool.Put(buff) - return result -} - -// Every compares every error in the given err against the given target error -// using [errors.Is], and returns true only if every comparison returned true. -func Every(err error, target error) bool { - for _, e := range extractErrors(err) { - if !errors.Is(e, target) { - return false - } - } - return true -} - -func (merr *multiError) Format(f fmt.State, c rune) { - if c == 'v' && f.Flag('+') { - merr.writeMultiline(f) - } else { - merr.writeSingleline(f) - } -} - -func (merr *multiError) writeSingleline(w io.Writer) { - first := true - for _, item := range merr.errors { - if first { - first = false - } else { - w.Write(_singlelineSeparator) - } - io.WriteString(w, item.Error()) - } -} - -func (merr *multiError) writeMultiline(w io.Writer) { - w.Write(_multilinePrefix) - for _, item := range merr.errors { - w.Write(_multilineSeparator) - writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) - } -} - -// Writes s to the writer with the given prefix added before each line after -// the first. -func writePrefixLine(w io.Writer, prefix []byte, s string) { - first := true - for len(s) > 0 { - if first { - first = false - } else { - w.Write(prefix) - } - - idx := strings.IndexByte(s, '\n') - if idx < 0 { - idx = len(s) - 1 - } - - io.WriteString(w, s[:idx+1]) - s = s[idx+1:] - } -} - -type inspectResult struct { - // Number of top-level non-nil errors - Count int - - // Total number of errors including multiErrors - Capacity int - - // Index of the first non-nil error in the list. Value is meaningless if - // Count is zero. - FirstErrorIdx int - - // Whether the list contains at least one multiError - ContainsMultiError bool -} - -// Inspects the given slice of errors so that we can efficiently allocate -// space for it. -func inspect(errors []error) (res inspectResult) { - first := true - for i, err := range errors { - if err == nil { - continue - } - - res.Count++ - if first { - first = false - res.FirstErrorIdx = i - } - - if merr, ok := err.(*multiError); ok { - res.Capacity += len(merr.errors) - res.ContainsMultiError = true - } else { - res.Capacity++ - } - } - return -} - -// fromSlice converts the given list of errors into a single error. -func fromSlice(errors []error) error { - // Don't pay to inspect small slices. - switch len(errors) { - case 0: - return nil - case 1: - return errors[0] - } - - res := inspect(errors) - switch res.Count { - case 0: - return nil - case 1: - // only one non-nil entry - return errors[res.FirstErrorIdx] - case len(errors): - if !res.ContainsMultiError { - // Error list is flat. Make a copy of it - // Otherwise "errors" escapes to the heap - // unconditionally for all other cases. - // This lets us optimize for the "no errors" case. - out := append(([]error)(nil), errors...) - return &multiError{errors: out} - } - } - - nonNilErrs := make([]error, 0, res.Capacity) - for _, err := range errors[res.FirstErrorIdx:] { - if err == nil { - continue - } - - if nested, ok := err.(*multiError); ok { - nonNilErrs = append(nonNilErrs, nested.errors...) - } else { - nonNilErrs = append(nonNilErrs, err) - } - } - - return &multiError{errors: nonNilErrs} -} - -// Combine combines the passed errors into a single error. -// -// If zero arguments were passed or if all items are nil, a nil error is -// returned. -// -// Combine(nil, nil) // == nil -// -// If only a single error was passed, it is returned as-is. -// -// Combine(err) // == err -// -// Combine skips over nil arguments so this function may be used to combine -// together errors from operations that fail independently of each other. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) -// -// If any of the passed errors is a multierr error, it will be flattened along -// with the other errors. -// -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) -// -// The returned error formats into a readable multi-line error message if -// formatted with %+v. -// -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) -func Combine(errors ...error) error { - return fromSlice(errors) -} - -// Append appends the given errors together. Either value may be nil. -// -// This function is a specialization of Combine for the common case where -// there are only two errors. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The following pattern may also be used to record failure of deferred -// operations without losing information about the original error. -// -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() -// -// Note that the variable MUST be a named return to append an error to it from -// the defer statement. See also [AppendInvoke]. -func Append(left error, right error) error { - switch { - case left == nil: - return right - case right == nil: - return left - } - - if _, ok := right.(*multiError); !ok { - if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { - // Common case where the error on the left is constantly being - // appended to. - errs := append(l.errors, right) - return &multiError{errors: errs} - } else if !ok { - // Both errors are single errors. - return &multiError{errors: []error{left, right}} - } - } - - // Either right or both, left and right, are multiErrors. Rely on usual - // expensive logic. - errors := [2]error{left, right} - return fromSlice(errors[0:]) -} - -// AppendInto appends an error into the destination of an error pointer and -// returns whether the error being appended was non-nil. -// -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) -// -// The above is equivalent to, -// -// err := multierr.Append(r.Close(), w.Close()) -// -// As AppendInto reports whether the provided error was non-nil, it may be -// used to build a multierr error in a loop more ergonomically. For example: -// -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } -// -// Compare this with a version that relies solely on Append: -// -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } -func AppendInto(into *error, err error) (errored bool) { - if into == nil { - // We panic if 'into' is nil. This is not documented above - // because suggesting that the pointer must be non-nil may - // confuse users into thinking that the error that it points - // to must be non-nil. - panic("misuse of multierr.AppendInto: into pointer must not be nil") - } - - if err == nil { - return false - } - *into = Append(*into, err) - return true -} - -// Invoker is an operation that may fail with an error. Use it with -// AppendInvoke to append the result of calling the function into an error. -// This allows you to conveniently defer capture of failing operations. -// -// See also, [Close] and [Invoke]. -type Invoker interface { - Invoke() error -} - -// Invoke wraps a function which may fail with an error to match the Invoker -// interface. Use it to supply functions matching this signature to -// AppendInvoke. -// -// For example, -// -// func processReader(r io.Reader) (err error) { -// scanner := bufio.NewScanner(r) -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// for scanner.Scan() { -// // ... -// } -// // ... -// } -// -// In this example, the following line will construct the Invoker right away, -// but defer the invocation of scanner.Err() until the function returns. -// -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// -// Note that the error you're appending to from the defer statement MUST be a -// named return. -type Invoke func() error - -// Invoke calls the supplied function and returns its result. -func (i Invoke) Invoke() error { return i() } - -// Close builds an Invoker that closes the provided io.Closer. Use it with -// AppendInvoke to close io.Closers and append their results into an error. -// -// For example, -// -// func processFile(path string) (err error) { -// f, err := os.Open(path) -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// return processReader(f) -// } -// -// In this example, multierr.Close will construct the Invoker right away, but -// defer the invocation of f.Close until the function returns. -// -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// -// Note that the error you're appending to from the defer statement MUST be a -// named return. -func Close(closer io.Closer) Invoker { - return Invoke(closer.Close) -} - -// AppendInvoke appends the result of calling the given Invoker into the -// provided error pointer. Use it with named returns to safely defer -// invocation of fallible operations until a function returns, and capture the -// resulting errors. -// -// func doSomething(...) (err error) { -// // ... -// f, err := openFile(..) -// if err != nil { -// return err -// } -// -// // multierr will call f.Close() when this function returns and -// // if the operation fails, its append its error into the -// // returned error. -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// -// scanner := bufio.NewScanner(f) -// // Similarly, this scheduled scanner.Err to be called and -// // inspected when the function returns and append its error -// // into the returned error. -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// -// // ... -// } -// -// NOTE: If used with a defer, the error variable MUST be a named return. -// -// Without defer, AppendInvoke behaves exactly like AppendInto. -// -// err := // ... -// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) -// -// // ...is roughly equivalent to... -// -// err := // ... -// multierr.AppendInto(&err, foo()) -// -// The advantage of the indirection introduced by Invoker is to make it easy -// to defer the invocation of a function. Without this indirection, the -// invoked function will be evaluated at the time of the defer block rather -// than when the function returns. -// -// // BAD: This is likely not what the caller intended. This will evaluate -// // foo() right away and append its result into the error when the -// // function returns. -// defer multierr.AppendInto(&err, foo()) -// -// // GOOD: This will defer invocation of foo unutil the function returns. -// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) -// -// multierr provides a few Invoker implementations out of the box for -// convenience. See [Invoker] for more information. -func AppendInvoke(into *error, invoker Invoker) { - AppendInto(into, invoker.Invoke()) -} - -// AppendFunc is a shorthand for [AppendInvoke]. -// It allows using function or method value directly -// without having to wrap it into an [Invoker] interface. -// -// func doSomething(...) (err error) { -// w, err := startWorker(...) -// if err != nil { -// return err -// } -// -// // multierr will call w.Stop() when this function returns and -// // if the operation fails, it appends its error into the -// // returned error. -// defer multierr.AppendFunc(&err, w.Stop) -// } -func AppendFunc(into *error, fn func() error) { - AppendInvoke(into, Invoke(fn)) -} diff --git a/openshift/tools/vendor/go.uber.org/multierr/error_post_go120.go b/openshift/tools/vendor/go.uber.org/multierr/error_post_go120.go deleted file mode 100644 index a173f9c251..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/error_post_go120.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2017-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.20 -// +build go1.20 - -package multierr - -// Unwrap returns a list of errors wrapped by this multierr. -func (merr *multiError) Unwrap() []error { - return merr.Errors() -} - -type multipleErrors interface { - Unwrap() []error -} - -func extractErrors(err error) []error { - if err == nil { - return nil - } - - // check if the given err is an Unwrapable error that - // implements multipleErrors interface. - eg, ok := err.(multipleErrors) - if !ok { - return []error{err} - } - - return append(([]error)(nil), eg.Unwrap()...) -} diff --git a/openshift/tools/vendor/go.uber.org/multierr/error_pre_go120.go b/openshift/tools/vendor/go.uber.org/multierr/error_pre_go120.go deleted file mode 100644 index 93872a3fcd..0000000000 --- a/openshift/tools/vendor/go.uber.org/multierr/error_pre_go120.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2017-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build !go1.20 -// +build !go1.20 - -package multierr - -import "errors" - -// Versions of Go before 1.20 did not support the Unwrap() []error method. -// This provides a similar behavior by implementing the Is(..) and As(..) -// methods. -// See the errors.Join proposal for details: -// https://github.com/golang/go/issues/53435 - -// As attempts to find the first error in the error list that matches the type -// of the value that target points to. -// -// This function allows errors.As to traverse the values stored on the -// multierr error. -func (merr *multiError) As(target interface{}) bool { - for _, err := range merr.Errors() { - if errors.As(err, target) { - return true - } - } - return false -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored on the -// multierr error. -func (merr *multiError) Is(target error) bool { - for _, err := range merr.Errors() { - if errors.Is(err, target) { - return true - } - } - return false -} - -func extractErrors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - return append(([]error)(nil), eg.Errors()...) -} diff --git a/openshift/tools/vendor/go.yaml.in/yaml/v2/.travis.yml b/openshift/tools/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 0000000000..7348c50c0c --- /dev/null +++ b/openshift/tools/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/openshift/tools/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/openshift/tools/vendor/go.yaml.in/yaml/v2/LICENSE similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE rename to openshift/tools/vendor/go.yaml.in/yaml/v2/LICENSE diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/openshift/tools/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml rename to openshift/tools/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/openshift/tools/vendor/go.yaml.in/yaml/v2/NOTICE similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE rename to openshift/tools/vendor/go.yaml.in/yaml/v2/NOTICE diff --git a/openshift/tools/vendor/go.yaml.in/yaml/v2/README.md b/openshift/tools/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 0000000000..c9388da425 --- /dev/null +++ b/openshift/tools/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/apic.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/apic.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/decode.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/decode.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/emitterc.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/emitterc.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/encode.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/encode.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/parserc.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/parserc.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/readerc.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/readerc.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/resolve.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/resolve.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/scannerc.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/scannerc.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/sorter.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/sorter.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/writerc.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/writerc.go diff --git a/openshift/tools/vendor/go.yaml.in/yaml/v2/yaml.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/yaml.go new file mode 100644 index 0000000000..5248e1263c --- /dev/null +++ b/openshift/tools/vendor/go.yaml.in/yaml/v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/yamlh.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/yamlh.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/openshift/tools/vendor/go.yaml.in/yaml/v2/yamlprivateh.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go rename to openshift/tools/vendor/go.yaml.in/yaml/v2/yamlprivateh.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE b/openshift/tools/vendor/go.yaml.in/yaml/v3/LICENSE similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE rename to openshift/tools/vendor/go.yaml.in/yaml/v3/LICENSE diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE b/openshift/tools/vendor/go.yaml.in/yaml/v3/NOTICE similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE rename to openshift/tools/vendor/go.yaml.in/yaml/v3/NOTICE diff --git a/openshift/tools/vendor/go.yaml.in/yaml/v3/README.md b/openshift/tools/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 0000000000..15a85a6350 --- /dev/null +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/apic.go similarity index 99% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/apic.go index ae7d049f18..05fd305da1 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/apic.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/openshift/tools/vendor/go.yaml.in/yaml/v3/decode.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 0000000000..02e2b17bfe --- /dev/null +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/emitterc.go similarity index 98% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/emitterc.go index 6ea0ae8c10..ab4e03ba72 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -162,10 +162,9 @@ func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { // Check if we need to accumulate more events before emitting. // // We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { if emitter.events_head == len(emitter.events) { return true @@ -485,6 +484,18 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") } +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/encode.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/encode.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/parserc.go similarity index 93% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/parserc.go index 268558a0d6..25fe823637 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/parserc.go @@ -227,7 +227,8 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool // Parse the production: // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ +// +// ************ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -249,9 +250,12 @@ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// * +// +// * +// // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* +// +// ************************* func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { token := peek_token(parser) @@ -356,8 +360,8 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t // Parse the productions: // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** // +// *********** func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -379,9 +383,10 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -428,30 +433,41 @@ func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// // block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// // flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// // properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* +// +// ************************* +// // block_content ::= block_collection | flow_collection | SCALAR -// ****** +// +// ****** +// // flow_content ::= flow_collection | SCALAR -// ****** +// +// ****** func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() @@ -682,8 +698,8 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i // Parse the productions: // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* // +// ******************** *********** * ********* func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -740,7 +756,8 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e // Parse the productions: // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * +// +// *********** * func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -805,14 +822,14 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* // -// BLOCK-END -// ********* +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* // +// BLOCK-END +// ********* func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -881,13 +898,11 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// +// ((KEY block_node_or_indentless_sequence?)? // +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -915,16 +930,18 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev // Parse the productions: // flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -987,11 +1004,10 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev return true } -// // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * // +// *** * func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1011,8 +1027,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, ev // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * // +// ***** * func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1035,8 +1051,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1053,16 +1069,17 @@ func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, ev // Parse the productions: // flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * // +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -1128,8 +1145,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event // Parse the productions: // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// +// - ***** * func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { token := peek_token(parser) if token == nil { diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/readerc.go similarity index 99% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/readerc.go index b7de0a89c4..56af245366 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/readerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/resolve.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/resolve.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/scannerc.go similarity index 99% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/scannerc.go index ca0070108f..30b1f08920 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -1614,11 +1614,11 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { // Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { // Eat '%'. start_mark := parser.mark @@ -1719,11 +1719,11 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Scan the directive name. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ // +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { // Consume the directive name. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1758,8 +1758,9 @@ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark // Scan the value of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ +// +// %YAML 1.1 # a comment \n +// ^^^^^^ func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1797,10 +1798,11 @@ const max_number_length = 2 // Scan the version number of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { // Repeat while the next character is digit. @@ -1834,9 +1836,9 @@ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark // Scan the value of a TAG-DIRECTIVE token. // // Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { var handle_value, prefix_value []byte @@ -2847,7 +2849,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t continue } if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false @@ -2876,7 +2878,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t parser.comments = append(parser.comments, yaml_comment_t{ token_mark: token_mark, start_mark: start_mark, - line: text, + line: text, }) } return true @@ -2910,7 +2912,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo // the foot is the line below it. var foot_line = -1 if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 + foot_line = parser.mark.line - parser.newlines + 1 if parser.newlines == 0 && parser.mark.column > 1 { foot_line++ } @@ -2996,7 +2998,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo recent_empty = false // Consume until after the consumed comment line. - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/sorter.go similarity index 100% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/sorter.go diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/writerc.go similarity index 99% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/writerc.go index b8a116bf9a..266d0b092c 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/writerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/openshift/tools/vendor/go.yaml.in/yaml/v3/yaml.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 0000000000..0b101cd20d --- /dev/null +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/yamlh.go similarity index 99% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/yamlh.go index 40c74de497..f59aa40f64 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -438,7 +438,9 @@ type yaml_document_t struct { // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). +// +// yaml_parser_set_input(). +// // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. @@ -639,7 +641,6 @@ type yaml_parser_t struct { } type yaml_comment_t struct { - scan_mark yaml_mark_t // Position where scanning for comments started token_mark yaml_mark_t // Position after which tokens will be associated with this comment start_mark yaml_mark_t // Position of '#' comment mark @@ -659,13 +660,14 @@ type yaml_comment_t struct { // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). +// +// yaml_emitter_set_output(). +// // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. -// type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error type yaml_emitter_state_t int diff --git a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go b/openshift/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go similarity index 97% rename from openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go rename to openshift/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go index e88f9c54ae..dea1ba9610 100644 --- a/openshift/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go +++ b/openshift/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -137,8 +137,8 @@ func is_crlf(b []byte, i int) bool { func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) return ( - // is_break: - b[i] == '\r' || // CR (#xD) + // is_break: + b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) @@ -151,8 +151,8 @@ func is_breakz(b []byte, i int) bool { func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) return ( - // is_space: - b[i] == ' ' || + // is_space: + b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) @@ -166,8 +166,8 @@ func is_spacez(b []byte, i int) bool { func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || + // is_blank: + b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) diff --git a/openshift/tools/vendor/golang.org/x/crypto/LICENSE b/openshift/tools/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 2a7cf70da6..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openshift/tools/vendor/golang.org/x/crypto/PATENTS b/openshift/tools/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 733099041f..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/openshift/tools/vendor/golang.org/x/crypto/cast5/cast5.go b/openshift/tools/vendor/golang.org/x/crypto/cast5/cast5.go deleted file mode 100644 index 016e90215c..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/cast5/cast5.go +++ /dev/null @@ -1,536 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. -// -// CAST5 is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package cast5 - -import ( - "errors" - "math/bits" -) - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := bits.RotateLeft32(t, int(r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := bits.RotateLeft32(t, int(r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := bits.RotateLeft32(t, int(r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/asn1.go deleted file mode 100644 index d25979d9f5..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - encoding_asn1 "encoding/asn1" - "fmt" - "math/big" - "reflect" - "time" - - "golang.org/x/crypto/cryptobyte/asn1" -) - -// This file contains ASN.1-related methods for String and Builder. - -// Builder - -// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Int64(v int64) { - b.addASN1Signed(asn1.INTEGER, v) -} - -// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the -// given tag. -func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { - b.addASN1Signed(tag, v) -} - -// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. -func (b *Builder) AddASN1Enum(v int64) { - b.addASN1Signed(asn1.ENUM, v) -} - -func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { - b.AddASN1(tag, func(c *Builder) { - length := 1 - for i := v; i >= 0x80 || i < -0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Uint64(v uint64) { - b.AddASN1(asn1.INTEGER, func(c *Builder) { - length := 1 - for i := v; i >= 0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1BigInt(n *big.Int) { - if b.err != nil { - return - } - - b.AddASN1(asn1.INTEGER, func(c *Builder) { - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement form. So we - // invert and subtract 1. If the most-significant-bit isn't set then - // we'll need to pad the beginning with 0xff in order to keep the number - // negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - c.add(0xff) - } - c.add(bytes...) - } else if n.Sign() == 0 { - c.add(0) - } else { - bytes := n.Bytes() - if bytes[0]&0x80 != 0 { - c.add(0) - } - c.add(bytes...) - } - }) -} - -// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. -func (b *Builder) AddASN1OctetString(bytes []byte) { - b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { - c.AddBytes(bytes) - }) -} - -const generalizedTimeFormatStr = "20060102150405Z0700" - -// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. -func (b *Builder) AddASN1GeneralizedTime(t time.Time) { - if t.Year() < 0 || t.Year() > 9999 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) - return - } - b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { - c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) - }) -} - -// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. -func (b *Builder) AddASN1UTCTime(t time.Time) { - b.AddASN1(asn1.UTCTime, func(c *Builder) { - // As utilized by the X.509 profile, UTCTime can only - // represent the years 1950 through 2049. - if t.Year() < 1950 || t.Year() >= 2050 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) - return - } - c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) - }) -} - -// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not -// support BIT STRINGs that are not a whole number of bytes. -func (b *Builder) AddASN1BitString(data []byte) { - b.AddASN1(asn1.BIT_STRING, func(b *Builder) { - b.AddUint8(0) - b.AddBytes(data) - }) -} - -func (b *Builder) addBase128Int(n int64) { - var length int - if n == 0 { - length = 1 - } else { - for i := n; i > 0; i >>= 7 { - length++ - } - } - - for i := length - 1; i >= 0; i-- { - o := byte(n >> uint(i*7)) - o &= 0x7f - if i != 0 { - o |= 0x80 - } - - b.add(o) - } -} - -func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { - if len(oid) < 2 { - return false - } - - if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { - return false - } - - for _, v := range oid { - if v < 0 { - return false - } - } - - return true -} - -func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { - b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { - if !isValidOID(oid) { - b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) - return - } - - b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) - for _, v := range oid[2:] { - b.addBase128Int(int64(v)) - } - }) -} - -func (b *Builder) AddASN1Boolean(v bool) { - b.AddASN1(asn1.BOOLEAN, func(b *Builder) { - if v { - b.AddUint8(0xff) - } else { - b.AddUint8(0) - } - }) -} - -func (b *Builder) AddASN1NULL() { - b.add(uint8(asn1.NULL), 0) -} - -// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if -// successful or records an error if one occurred. -func (b *Builder) MarshalASN1(v interface{}) { - // NOTE(martinkr): This is somewhat of a hack to allow propagation of - // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a - // value embedded into a struct, its tag information is lost. - if b.err != nil { - return - } - bytes, err := encoding_asn1.Marshal(v) - if err != nil { - b.err = err - return - } - b.AddBytes(bytes) -} - -// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. -// Tags greater than 30 are not supported and result in an error (i.e. -// low-tag-number form only). The child builder passed to the -// BuilderContinuation can be used to build the content of the ASN.1 object. -func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { - if b.err != nil { - return - } - // Identifiers with the low five bits set indicate high-tag-number format - // (two or more octets), which we don't support. - if tag&0x1f == 0x1f { - b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag) - return - } - b.AddUint8(uint8(tag)) - b.addLengthPrefixed(1, true, f) -} - -// String - -// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean -// representation into out and advances. It reports whether the read -// was successful. -func (s *String) ReadASN1Boolean(out *bool) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { - return false - } - - switch bytes[0] { - case 0: - *out = false - case 0xff: - *out = true - default: - return false - } - - return true -} - -// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does -// not point to an integer, to a big.Int, or to a []byte it panics. Only -// positive and zero values can be decoded into []byte, and they are returned as -// big-endian binary values that share memory with s. Positive values will have -// no leading zeroes, and zero will be returned as a single zero byte. -// ReadASN1Integer reports whether the read was successful. -func (s *String) ReadASN1Integer(out interface{}) bool { - switch out := out.(type) { - case *int, *int8, *int16, *int32, *int64: - var i int64 - if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { - return false - } - reflect.ValueOf(out).Elem().SetInt(i) - return true - case *uint, *uint8, *uint16, *uint32, *uint64: - var u uint64 - if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { - return false - } - reflect.ValueOf(out).Elem().SetUint(u) - return true - case *big.Int: - return s.readASN1BigInt(out) - case *[]byte: - return s.readASN1Bytes(out) - default: - panic("out does not point to an integer type") - } -} - -func checkASN1Integer(bytes []byte) bool { - if len(bytes) == 0 { - // An INTEGER is encoded with at least one octet. - return false - } - if len(bytes) == 1 { - return true - } - if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { - // Value is not minimally encoded. - return false - } - return true -} - -var bigOne = big.NewInt(1) - -func (s *String) readASN1BigInt(out *big.Int) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { - return false - } - if bytes[0]&0x80 == 0x80 { - // Negative number. - neg := make([]byte, len(bytes)) - for i, b := range bytes { - neg[i] = ^b - } - out.SetBytes(neg) - out.Add(out, bigOne) - out.Neg(out) - } else { - out.SetBytes(bytes) - } - return true -} - -func (s *String) readASN1Bytes(out *[]byte) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { - return false - } - if bytes[0]&0x80 == 0x80 { - return false - } - for len(bytes) > 1 && bytes[0] == 0 { - bytes = bytes[1:] - } - *out = bytes - return true -} - -func (s *String) readASN1Int64(out *int64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { - return false - } - return true -} - -func asn1Signed(out *int64, n []byte) bool { - length := len(n) - if length > 8 { - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= int64(n[i]) - } - // Shift up and down in order to sign extend the result. - *out <<= 64 - uint8(length)*8 - *out >>= 64 - uint8(length)*8 - return true -} - -func (s *String) readASN1Uint64(out *uint64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { - return false - } - return true -} - -func asn1Unsigned(out *uint64, n []byte) bool { - length := len(n) - if length > 9 || length == 9 && n[0] != 0 { - // Too large for uint64. - return false - } - if n[0]&0x80 != 0 { - // Negative number. - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= uint64(n[i]) - } - return true -} - -// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out -// and advances. It reports whether the read was successful and resulted in a -// value that can be represented in an int64. -func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { - var bytes String - return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) -} - -// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports -// whether the read was successful. -func (s *String) ReadASN1Enum(out *int) bool { - var bytes String - var i int64 - if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { - return false - } - if int64(int(i)) != i { - return false - } - *out = int(i) - return true -} - -func (s *String) readBase128Int(out *int) bool { - ret := 0 - for i := 0; len(*s) > 0; i++ { - if i == 5 { - return false - } - // Avoid overflowing int on a 32-bit platform. - // We don't want different behavior based on the architecture. - if ret >= 1<<(31-7) { - return false - } - ret <<= 7 - b := s.read(1)[0] - - // ITU-T X.690, section 8.19.2: - // The subidentifier shall be encoded in the fewest possible octets, - // that is, the leading octet of the subidentifier shall not have the value 0x80. - if i == 0 && b == 0x80 { - return false - } - - ret |= int(b & 0x7f) - if b&0x80 == 0 { - *out = ret - return true - } - } - return false // truncated -} - -// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { - return false - } - - // In the worst case, we get two elements from the first byte (which is - // encoded differently) and then every varint is a single byte long. - components := make([]int, len(bytes)+1) - - // The first varint is 40*value1 + value2: - // According to this packing, value1 can take the values 0, 1 and 2 only. - // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, - // then there are no restrictions on value2. - var v int - if !bytes.readBase128Int(&v) { - return false - } - if v < 80 { - components[0] = v / 40 - components[1] = v % 40 - } else { - components[0] = 2 - components[1] = v - 80 - } - - i := 2 - for ; len(bytes) > 0; i++ { - if !bytes.readBase128Int(&v) { - return false - } - components[i] = v - } - *out = components[:i] - return true -} - -// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { - return false - } - t := string(bytes) - res, err := time.Parse(generalizedTimeFormatStr, t) - if err != nil { - return false - } - if serialized := res.Format(generalizedTimeFormatStr); serialized != t { - return false - } - *out = res - return true -} - -const defaultUTCTimeFormatStr = "060102150405Z0700" - -// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1UTCTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.UTCTime) { - return false - } - t := string(bytes) - - formatStr := defaultUTCTimeFormatStr - var err error - res, err := time.Parse(formatStr, t) - if err != nil { - // Fallback to minute precision if we can't parse second - // precision. If we are following X.509 or X.690 we shouldn't - // support this, but we do. - formatStr = "0601021504Z0700" - res, err = time.Parse(formatStr, t) - } - if err != nil { - return false - } - - if serialized := res.Format(formatStr); serialized != t { - return false - } - - if res.Year() >= 2050 { - // UTCTime interprets the low order digits 50-99 as 1950-99. - // This only applies to its use in the X.509 profile. - // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 - res = res.AddDate(-100, 0, 0) - } - *out = res - return true -} - -// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || - len(bytes)*8/8 != len(bytes) { - return false - } - - paddingBits := bytes[0] - bytes = bytes[1:] - if paddingBits > 7 || - len(bytes) == 0 && paddingBits != 0 || - len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { - return false - } - - lenBytes := String((*s)[2 : 2+lenLen]) - if !lenBytes.readUnsigned(&len32, int(lenLen)) { - return false - } - - // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length - // with the minimum number of octets. - if len32 < 128 { - // Length should have used short-form encoding. - return false - } - if len32>>((lenLen-1)*8) == 0 { - // Leading octet is 0. Length should have been at least one byte shorter. - return false - } - - headerLen = 2 + uint32(lenLen) - if headerLen+len32 < len32 { - // Overflow. - return false - } - length = headerLen + len32 - } - - if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { - return false - } - if skipHeader && !out.Skip(int(headerLen)) { - panic("cryptobyte: internal error") - } - - return true -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go deleted file mode 100644 index 90ef6a241d..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package asn1 contains supporting types for parsing and building ASN.1 -// messages with the cryptobyte package. -package asn1 - -// Tag represents an ASN.1 identifier octet, consisting of a tag number -// (indicating a type) and class (such as context-specific or constructed). -// -// Methods in the cryptobyte package only support the low-tag-number form, i.e. -// a single identifier octet with bits 7-8 encoding the class and bits 1-6 -// encoding the tag number. -type Tag uint8 - -const ( - classConstructed = 0x20 - classContextSpecific = 0x80 -) - -// Constructed returns t with the constructed class bit set. -func (t Tag) Constructed() Tag { return t | classConstructed } - -// ContextSpecific returns t with the context-specific class bit set. -func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } - -// The following is a list of standard tag and class combinations. -const ( - BOOLEAN = Tag(1) - INTEGER = Tag(2) - BIT_STRING = Tag(3) - OCTET_STRING = Tag(4) - NULL = Tag(5) - OBJECT_IDENTIFIER = Tag(6) - ENUM = Tag(10) - UTF8String = Tag(12) - SEQUENCE = Tag(16 | classConstructed) - SET = Tag(17 | classConstructed) - PrintableString = Tag(19) - T61String = Tag(20) - IA5String = Tag(22) - UTCTime = Tag(23) - GeneralizedTime = Tag(24) - GeneralString = Tag(27) -) diff --git a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/builder.go b/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/builder.go deleted file mode 100644 index cf254f5f1e..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/builder.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - "errors" - "fmt" -) - -// A Builder builds byte strings from fixed-length and length-prefixed values. -// Builders either allocate space as needed, or are ‘fixed’, which means that -// they write into a given buffer and produce an error if it's exhausted. -// -// The zero value is a usable Builder that allocates space as needed. -// -// Simple values are marshaled and appended to a Builder using methods on the -// Builder. Length-prefixed values are marshaled by providing a -// BuilderContinuation, which is a function that writes the inner contents of -// the value to a given Builder. See the documentation for BuilderContinuation -// for details. -type Builder struct { - err error - result []byte - fixedSize bool - child *Builder - offset int - pendingLenLen int - pendingIsASN1 bool - inContinuation *bool -} - -// NewBuilder creates a Builder that appends its output to the given buffer. -// Like append(), the slice will be reallocated if its capacity is exceeded. -// Use Bytes to get the final buffer. -func NewBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - } -} - -// NewFixedBuilder creates a Builder that appends its output into the given -// buffer. This builder does not reallocate the output buffer. Writes that -// would exceed the buffer's capacity are treated as an error. -func NewFixedBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - fixedSize: true, - } -} - -// SetError sets the value to be returned as the error from Bytes. Writes -// performed after calling SetError are ignored. -func (b *Builder) SetError(err error) { - b.err = err -} - -// Bytes returns the bytes written by the builder or an error if one has -// occurred during building. -func (b *Builder) Bytes() ([]byte, error) { - if b.err != nil { - return nil, b.err - } - return b.result[b.offset:], nil -} - -// BytesOrPanic returns the bytes written by the builder or panics if an error -// has occurred during building. -func (b *Builder) BytesOrPanic() []byte { - if b.err != nil { - panic(b.err) - } - return b.result[b.offset:] -} - -// AddUint8 appends an 8-bit value to the byte string. -func (b *Builder) AddUint8(v uint8) { - b.add(byte(v)) -} - -// AddUint16 appends a big-endian, 16-bit value to the byte string. -func (b *Builder) AddUint16(v uint16) { - b.add(byte(v>>8), byte(v)) -} - -// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest -// byte of the 32-bit input value is silently truncated. -func (b *Builder) AddUint24(v uint32) { - b.add(byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint32 appends a big-endian, 32-bit value to the byte string. -func (b *Builder) AddUint32(v uint32) { - b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint48 appends a big-endian, 48-bit value to the byte string. -func (b *Builder) AddUint48(v uint64) { - b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint64 appends a big-endian, 64-bit value to the byte string. -func (b *Builder) AddUint64(v uint64) { - b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddBytes appends a sequence of bytes to the byte string. -func (b *Builder) AddBytes(v []byte) { - b.add(v...) -} - -// BuilderContinuation is a continuation-passing interface for building -// length-prefixed byte sequences. Builder methods for length-prefixed -// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation -// supplied to them. The child builder passed to the continuation can be used -// to build the content of the length-prefixed sequence. For example: -// -// parent := cryptobyte.NewBuilder() -// parent.AddUint8LengthPrefixed(func (child *Builder) { -// child.AddUint8(42) -// child.AddUint8LengthPrefixed(func (grandchild *Builder) { -// grandchild.AddUint8(5) -// }) -// }) -// -// It is an error to write more bytes to the child than allowed by the reserved -// length prefix. After the continuation returns, the child must be considered -// invalid, i.e. users must not store any copies or references of the child -// that outlive the continuation. -// -// If the continuation panics with a value of type BuildError then the inner -// error will be returned as the error from Bytes. If the child panics -// otherwise then Bytes will repanic with the same value. -type BuilderContinuation func(child *Builder) - -// BuildError wraps an error. If a BuilderContinuation panics with this value, -// the panic will be recovered and the inner error will be returned from -// Builder.Bytes. -type BuildError struct { - Err error -} - -// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. -func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(1, false, f) -} - -// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. -func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(2, false, f) -} - -// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. -func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(3, false, f) -} - -// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. -func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(4, false, f) -} - -func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { - if !*b.inContinuation { - *b.inContinuation = true - - defer func() { - *b.inContinuation = false - - r := recover() - if r == nil { - return - } - - if buildError, ok := r.(BuildError); ok { - b.err = buildError.Err - } else { - panic(r) - } - }() - } - - f(arg) -} - -func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { - // Subsequent writes can be ignored if the builder has encountered an error. - if b.err != nil { - return - } - - offset := len(b.result) - b.add(make([]byte, lenLen)...) - - if b.inContinuation == nil { - b.inContinuation = new(bool) - } - - b.child = &Builder{ - result: b.result, - fixedSize: b.fixedSize, - offset: offset, - pendingLenLen: lenLen, - pendingIsASN1: isASN1, - inContinuation: b.inContinuation, - } - - b.callContinuation(f, b.child) - b.flushChild() - if b.child != nil { - panic("cryptobyte: internal error") - } -} - -func (b *Builder) flushChild() { - if b.child == nil { - return - } - b.child.flushChild() - child := b.child - b.child = nil - - if child.err != nil { - b.err = child.err - return - } - - length := len(child.result) - child.pendingLenLen - child.offset - - if length < 0 { - panic("cryptobyte: internal error") // result unexpectedly shrunk - } - - if child.pendingIsASN1 { - // For ASN.1, we reserved a single byte for the length. If that turned out - // to be incorrect, we have to move the contents along in order to make - // space. - if child.pendingLenLen != 1 { - panic("cryptobyte: internal error") - } - var lenLen, lenByte uint8 - if int64(length) > 0xfffffffe { - b.err = errors.New("pending ASN.1 child too long") - return - } else if length > 0xffffff { - lenLen = 5 - lenByte = 0x80 | 4 - } else if length > 0xffff { - lenLen = 4 - lenByte = 0x80 | 3 - } else if length > 0xff { - lenLen = 3 - lenByte = 0x80 | 2 - } else if length > 0x7f { - lenLen = 2 - lenByte = 0x80 | 1 - } else { - lenLen = 1 - lenByte = uint8(length) - length = 0 - } - - // Insert the initial length byte, make space for successive length bytes, - // and adjust the offset. - child.result[child.offset] = lenByte - extraBytes := int(lenLen - 1) - if extraBytes != 0 { - child.add(make([]byte, extraBytes)...) - childStart := child.offset + child.pendingLenLen - copy(child.result[childStart+extraBytes:], child.result[childStart:]) - } - child.offset++ - child.pendingLenLen = extraBytes - } - - l := length - for i := child.pendingLenLen - 1; i >= 0; i-- { - child.result[child.offset+i] = uint8(l) - l >>= 8 - } - if l != 0 { - b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) - return - } - - if b.fixedSize && &b.result[0] != &child.result[0] { - panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") - } - - b.result = child.result -} - -func (b *Builder) add(bytes ...byte) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted write while child is pending") - } - if len(b.result)+len(bytes) < len(bytes) { - b.err = errors.New("cryptobyte: length overflow") - } - if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { - b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") - return - } - b.result = append(b.result, bytes...) -} - -// Unwrite rolls back non-negative n bytes written directly to the Builder. -// An attempt by a child builder passed to a continuation to unwrite bytes -// from its parent will panic. -func (b *Builder) Unwrite(n int) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted unwrite while child is pending") - } - length := len(b.result) - b.pendingLenLen - b.offset - if length < 0 { - panic("cryptobyte: internal error") - } - if n < 0 { - panic("cryptobyte: attempted to unwrite negative number of bytes") - } - if n > length { - panic("cryptobyte: attempted to unwrite more than was written") - } - b.result = b.result[:len(b.result)-n] -} - -// A MarshalingValue marshals itself into a Builder. -type MarshalingValue interface { - // Marshal is called by Builder.AddValue. It receives a pointer to a builder - // to marshal itself into. It may return an error that occurred during - // marshaling, such as unset or invalid values. - Marshal(b *Builder) error -} - -// AddValue calls Marshal on v, passing a pointer to the builder to append to. -// If Marshal returns an error, it is set on the Builder so that subsequent -// appends don't have an effect. -func (b *Builder) AddValue(v MarshalingValue) { - err := v.Marshal(b) - if err != nil { - b.err = err - } -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/string.go b/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/string.go deleted file mode 100644 index 4b0f8097f9..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/cryptobyte/string.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cryptobyte contains types that help with parsing and constructing -// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage -// contains useful ASN.1 constants.) -// -// The String type is for parsing. It wraps a []byte slice and provides helper -// functions for consuming structures, value by value. -// -// The Builder type is for constructing messages. It providers helper functions -// for appending values and also for appending length-prefixed submessages – -// without having to worry about calculating the length prefix ahead of time. -// -// See the documentation and examples for the Builder and String types to get -// started. -package cryptobyte - -// String represents a string of bytes. It provides methods for parsing -// fixed-length and length-prefixed values from it. -type String []byte - -// read advances a String by n bytes and returns them. If less than n bytes -// remain, it returns nil. -func (s *String) read(n int) []byte { - if len(*s) < n || n < 0 { - return nil - } - v := (*s)[:n] - *s = (*s)[n:] - return v -} - -// Skip advances the String by n byte and reports whether it was successful. -func (s *String) Skip(n int) bool { - return s.read(n) != nil -} - -// ReadUint8 decodes an 8-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint8(out *uint8) bool { - v := s.read(1) - if v == nil { - return false - } - *out = uint8(v[0]) - return true -} - -// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint16(out *uint16) bool { - v := s.read(2) - if v == nil { - return false - } - *out = uint16(v[0])<<8 | uint16(v[1]) - return true -} - -// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint24(out *uint32) bool { - v := s.read(3) - if v == nil { - return false - } - *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) - return true -} - -// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint32(out *uint32) bool { - v := s.read(4) - if v == nil { - return false - } - *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) - return true -} - -// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint48(out *uint64) bool { - v := s.read(6) - if v == nil { - return false - } - *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) - return true -} - -// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint64(out *uint64) bool { - v := s.read(8) - if v == nil { - return false - } - *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) - return true -} - -func (s *String) readUnsigned(out *uint32, length int) bool { - v := s.read(length) - if v == nil { - return false - } - var result uint32 - for i := 0; i < length; i++ { - result <<= 8 - result |= uint32(v[i]) - } - *out = result - return true -} - -func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { - lenBytes := s.read(lenLen) - if lenBytes == nil { - return false - } - var length uint32 - for _, b := range lenBytes { - length = length << 8 - length = length | uint32(b) - } - v := s.read(int(length)) - if v == nil { - return false - } - *outChild = v - return true -} - -// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value -// into out and advances over it. It reports whether the read was successful. -func (s *String) ReadUint8LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(1, out) -} - -// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit -// length-prefixed value into out and advances over it. It reports whether the -// read was successful. -func (s *String) ReadUint16LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(2, out) -} - -// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit -// length-prefixed value into out and advances over it. It reports whether -// the read was successful. -func (s *String) ReadUint24LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(3, out) -} - -// ReadBytes reads n bytes into out and advances over them. It reports -// whether the read was successful. -func (s *String) ReadBytes(out *[]byte, n int) bool { - v := s.read(n) - if v == nil { - return false - } - *out = v - return true -} - -// CopyBytes copies len(out) bytes into out and advances over them. It reports -// whether the copy operation was successful -func (s *String) CopyBytes(out []byte) bool { - n := len(out) - v := s.read(n) - if v == nil { - return false - } - return copy(out, v) == n -} - -// Empty reports whether the string does not contain any bytes. -func (s String) Empty() bool { - return len(s) == 0 -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/hkdf/hkdf.go b/openshift/tools/vendor/golang.org/x/crypto/hkdf/hkdf.go deleted file mode 100644 index 3bee66294e..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation -// Function (HKDF) as defined in RFC 5869. -// -// HKDF is a cryptographic key derivation function (KDF) with the goal of -// expanding limited input keying material into one or more cryptographically -// strong secret keys. -package hkdf - -import ( - "crypto/hmac" - "errors" - "hash" - "io" -) - -// Extract generates a pseudorandom key for use with Expand from an input secret -// and an optional independent salt. -// -// Only use this function if you need to reuse the extracted key with multiple -// Expand invocations and different context values. Most common scenarios, -// including the generation of multiple keys, should use New instead. -func Extract(hash func() hash.Hash, secret, salt []byte) []byte { - if salt == nil { - salt = make([]byte, hash().Size()) - } - extractor := hmac.New(hash, salt) - extractor.Write(secret) - return extractor.Sum(nil) -} - -type hkdf struct { - expander hash.Hash - size int - - info []byte - counter byte - - prev []byte - buf []byte -} - -func (f *hkdf) Read(p []byte) (int, error) { - // Check whether enough data can be generated - need := len(p) - remains := len(f.buf) + int(255-f.counter+1)*f.size - if remains < need { - return 0, errors.New("hkdf: entropy limit reached") - } - // Read any leftover from the buffer - n := copy(p, f.buf) - p = p[n:] - - // Fill the rest of the buffer - for len(p) > 0 { - if f.counter > 1 { - f.expander.Reset() - } - f.expander.Write(f.prev) - f.expander.Write(f.info) - f.expander.Write([]byte{f.counter}) - f.prev = f.expander.Sum(f.prev[:0]) - f.counter++ - - // Copy the new batch into p - f.buf = f.prev - n = copy(p, f.buf) - p = p[n:] - } - // Save leftovers for next run - f.buf = f.buf[n:] - - return need, nil -} - -// Expand returns a Reader, from which keys can be read, using the given -// pseudorandom key and optional context info, skipping the extraction step. -// -// The pseudorandomKey should have been generated by Extract, or be a uniformly -// random or pseudorandom cryptographically strong key. See RFC 5869, Section -// 3.3. Most common scenarios will want to use New instead. -func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { - expander := hmac.New(hash, pseudorandomKey) - return &hkdf{expander, expander.Size(), info, 1, nil, nil} -} - -// New returns a Reader, from which keys can be read, using the given hash, -// secret, salt and context info. Salt and info can be nil. -func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { - prk := Extract(hash, secret, salt) - return Expand(hash, prk, info) -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/doc.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/doc.go deleted file mode 100644 index bbf391fe6e..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha3 implements the SHA-3 fixed-output-length hash functions and -// the SHAKE variable-output-length hash functions defined by FIPS-202. -// -// All types in this package also implement [encoding.BinaryMarshaler], -// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and -// unmarshal the internal state of the hash. -// -// Both types of hash function use the "sponge" construction and the Keccak -// permutation. For a detailed specification see http://keccak.noekeon.org/ -// -// # Guidance -// -// If you aren't sure what function you need, use SHAKE256 with at least 64 -// bytes of output. The SHAKE instances are faster than the SHA3 instances; -// the latter have to allocate memory to conform to the hash.Hash interface. -// -// If you need a secret-key MAC (message authentication code), prepend the -// secret key to the input, hash with SHAKE256 and read at least 32 bytes of -// output. -// -// # Security strengths -// -// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security -// strength against preimage attacks of x bits. Since they only produce "x" -// bits of output, their collision-resistance is only "x/2" bits. -// -// The SHAKE-256 and -128 functions have a generic security strength of 256 and -// 128 bits against all attacks, provided that at least 2x bits of their output -// is used. Requesting more than 64 or 32 bytes of output, respectively, does -// not increase the collision-resistance of the SHAKE functions. -// -// # The sponge construction -// -// A sponge builds a pseudo-random function from a public pseudo-random -// permutation, by applying the permutation to a state of "rate + capacity" -// bytes, but hiding "capacity" of the bytes. -// -// A sponge starts out with a zero state. To hash an input using a sponge, up -// to "rate" bytes of the input are XORed into the sponge's state. The sponge -// is then "full" and the permutation is applied to "empty" it. This process is -// repeated until all the input has been "absorbed". The input is then padded. -// The digest is "squeezed" from the sponge in the same way, except that output -// is copied out instead of input being XORed in. -// -// A sponge is parameterized by its generic security strength, which is equal -// to half its capacity; capacity + rate is equal to the permutation's width. -// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means -// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. -// -// # Recommendations -// -// The SHAKE functions are recommended for most new uses. They can produce -// output of arbitrary length. SHAKE256, with an output length of at least -// 64 bytes, provides 256-bit security against all attacks. The Keccak team -// recommends it for most applications upgrading from SHA2-512. (NIST chose a -// much stronger, but much slower, sponge instance for SHA3-512.) -// -// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. -// They produce output of the same length, with the same security strengths -// against all attacks. This means, in particular, that SHA3-256 only has -// 128-bit collision resistance, because its output length is 32 bytes. -package sha3 diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/hashes.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/hashes.go deleted file mode 100644 index 31fffbe044..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/hashes.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// This file provides functions for creating instances of the SHA-3 -// and SHAKE hash functions, as well as utility functions for hashing -// bytes. - -import ( - "crypto" - "hash" -) - -// New224 creates a new SHA3-224 hash. -// Its generic security strength is 224 bits against preimage attacks, -// and 112 bits against collision attacks. -func New224() hash.Hash { - return new224() -} - -// New256 creates a new SHA3-256 hash. -// Its generic security strength is 256 bits against preimage attacks, -// and 128 bits against collision attacks. -func New256() hash.Hash { - return new256() -} - -// New384 creates a new SHA3-384 hash. -// Its generic security strength is 384 bits against preimage attacks, -// and 192 bits against collision attacks. -func New384() hash.Hash { - return new384() -} - -// New512 creates a new SHA3-512 hash. -// Its generic security strength is 512 bits against preimage attacks, -// and 256 bits against collision attacks. -func New512() hash.Hash { - return new512() -} - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} - -const ( - dsbyteSHA3 = 0b00000110 - dsbyteKeccak = 0b00000001 - dsbyteShake = 0b00011111 - dsbyteCShake = 0b00000100 - - // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in - // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. - rateK256 = (1600 - 256) / 8 - rateK448 = (1600 - 448) / 8 - rateK512 = (1600 - 512) / 8 - rateK768 = (1600 - 768) / 8 - rateK1024 = (1600 - 1024) / 8 -) - -func new224Generic() *state { - return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3} -} - -func new256Generic() *state { - return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3} -} - -func new384Generic() *state { - return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3} -} - -func new512Generic() *state { - return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} -} - -// NewLegacyKeccak256 creates a new Keccak-256 hash. -// -// Only use this function if you require compatibility with an existing cryptosystem -// that uses non-standard padding. All other users should use New256 instead. -func NewLegacyKeccak256() hash.Hash { - return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} -} - -// NewLegacyKeccak512 creates a new Keccak-512 hash. -// -// Only use this function if you require compatibility with an existing cryptosystem -// that uses non-standard padding. All other users should use New512 instead. -func NewLegacyKeccak512() hash.Hash { - return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} -} - -// Sum224 returns the SHA3-224 digest of the data. -func Sum224(data []byte) (digest [28]byte) { - h := New224() - h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum256 returns the SHA3-256 digest of the data. -func Sum256(data []byte) (digest [32]byte) { - h := New256() - h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum384 returns the SHA3-384 digest of the data. -func Sum384(data []byte) (digest [48]byte) { - h := New384() - h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum512 returns the SHA3-512 digest of the data. -func Sum512(data []byte) (digest [64]byte) { - h := New512() - h.Write(data) - h.Sum(digest[:0]) - return -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/hashes_noasm.go deleted file mode 100644 index 9d85fb6214..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/hashes_noasm.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -func new224() *state { - return new224Generic() -} - -func new256() *state { - return new256Generic() -} - -func new384() *state { - return new384Generic() -} - -func new512() *state { - return new512Generic() -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf.go deleted file mode 100644 index ce48b1dd3e..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc - -package sha3 - -import "math/bits" - -// rc stores the round constants for use in the ι step. -var rc = [24]uint64{ - 0x0000000000000001, - 0x0000000000008082, - 0x800000000000808A, - 0x8000000080008000, - 0x000000000000808B, - 0x0000000080000001, - 0x8000000080008081, - 0x8000000000008009, - 0x000000000000008A, - 0x0000000000000088, - 0x0000000080008009, - 0x000000008000000A, - 0x000000008000808B, - 0x800000000000008B, - 0x8000000000008089, - 0x8000000000008003, - 0x8000000000008002, - 0x8000000000000080, - 0x000000000000800A, - 0x800000008000000A, - 0x8000000080008081, - 0x8000000000008080, - 0x0000000080000001, - 0x8000000080008008, -} - -// keccakF1600 applies the Keccak permutation to a 1600b-wide -// state represented as a slice of 25 uint64s. -func keccakF1600(a *[25]uint64) { - // Implementation translated from Keccak-inplace.c - // in the keccak reference code. - var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 - - for i := 0; i < 24; i += 4 { - // Combines the 5 steps in each round into 2 steps. - // Unrolls 4 rounds per loop and spreads some steps across rounds. - - // Round 1 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[6] ^ d1 - bc1 = bits.RotateLeft64(t, 44) - t = a[12] ^ d2 - bc2 = bits.RotateLeft64(t, 43) - t = a[18] ^ d3 - bc3 = bits.RotateLeft64(t, 21) - t = a[24] ^ d4 - bc4 = bits.RotateLeft64(t, 14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] - a[6] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc2 = bits.RotateLeft64(t, 3) - t = a[16] ^ d1 - bc3 = bits.RotateLeft64(t, 45) - t = a[22] ^ d2 - bc4 = bits.RotateLeft64(t, 61) - t = a[3] ^ d3 - bc0 = bits.RotateLeft64(t, 28) - t = a[9] ^ d4 - bc1 = bits.RotateLeft64(t, 20) - a[10] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc4 = bits.RotateLeft64(t, 18) - t = a[1] ^ d1 - bc0 = bits.RotateLeft64(t, 1) - t = a[7] ^ d2 - bc1 = bits.RotateLeft64(t, 6) - t = a[13] ^ d3 - bc2 = bits.RotateLeft64(t, 25) - t = a[19] ^ d4 - bc3 = bits.RotateLeft64(t, 8) - a[20] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc1 = bits.RotateLeft64(t, 36) - t = a[11] ^ d1 - bc2 = bits.RotateLeft64(t, 10) - t = a[17] ^ d2 - bc3 = bits.RotateLeft64(t, 15) - t = a[23] ^ d3 - bc4 = bits.RotateLeft64(t, 56) - t = a[4] ^ d4 - bc0 = bits.RotateLeft64(t, 27) - a[5] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc3 = bits.RotateLeft64(t, 41) - t = a[21] ^ d1 - bc4 = bits.RotateLeft64(t, 2) - t = a[2] ^ d2 - bc0 = bits.RotateLeft64(t, 62) - t = a[8] ^ d3 - bc1 = bits.RotateLeft64(t, 55) - t = a[14] ^ d4 - bc2 = bits.RotateLeft64(t, 39) - a[15] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - // Round 2 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[16] ^ d1 - bc1 = bits.RotateLeft64(t, 44) - t = a[7] ^ d2 - bc2 = bits.RotateLeft64(t, 43) - t = a[23] ^ d3 - bc3 = bits.RotateLeft64(t, 21) - t = a[14] ^ d4 - bc4 = bits.RotateLeft64(t, 14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] - a[16] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc2 = bits.RotateLeft64(t, 3) - t = a[11] ^ d1 - bc3 = bits.RotateLeft64(t, 45) - t = a[2] ^ d2 - bc4 = bits.RotateLeft64(t, 61) - t = a[18] ^ d3 - bc0 = bits.RotateLeft64(t, 28) - t = a[9] ^ d4 - bc1 = bits.RotateLeft64(t, 20) - a[20] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc4 = bits.RotateLeft64(t, 18) - t = a[6] ^ d1 - bc0 = bits.RotateLeft64(t, 1) - t = a[22] ^ d2 - bc1 = bits.RotateLeft64(t, 6) - t = a[13] ^ d3 - bc2 = bits.RotateLeft64(t, 25) - t = a[4] ^ d4 - bc3 = bits.RotateLeft64(t, 8) - a[15] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc1 = bits.RotateLeft64(t, 36) - t = a[1] ^ d1 - bc2 = bits.RotateLeft64(t, 10) - t = a[17] ^ d2 - bc3 = bits.RotateLeft64(t, 15) - t = a[8] ^ d3 - bc4 = bits.RotateLeft64(t, 56) - t = a[24] ^ d4 - bc0 = bits.RotateLeft64(t, 27) - a[10] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc3 = bits.RotateLeft64(t, 41) - t = a[21] ^ d1 - bc4 = bits.RotateLeft64(t, 2) - t = a[12] ^ d2 - bc0 = bits.RotateLeft64(t, 62) - t = a[3] ^ d3 - bc1 = bits.RotateLeft64(t, 55) - t = a[19] ^ d4 - bc2 = bits.RotateLeft64(t, 39) - a[5] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - // Round 3 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[11] ^ d1 - bc1 = bits.RotateLeft64(t, 44) - t = a[22] ^ d2 - bc2 = bits.RotateLeft64(t, 43) - t = a[8] ^ d3 - bc3 = bits.RotateLeft64(t, 21) - t = a[19] ^ d4 - bc4 = bits.RotateLeft64(t, 14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] - a[11] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc2 = bits.RotateLeft64(t, 3) - t = a[1] ^ d1 - bc3 = bits.RotateLeft64(t, 45) - t = a[12] ^ d2 - bc4 = bits.RotateLeft64(t, 61) - t = a[23] ^ d3 - bc0 = bits.RotateLeft64(t, 28) - t = a[9] ^ d4 - bc1 = bits.RotateLeft64(t, 20) - a[15] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc4 = bits.RotateLeft64(t, 18) - t = a[16] ^ d1 - bc0 = bits.RotateLeft64(t, 1) - t = a[2] ^ d2 - bc1 = bits.RotateLeft64(t, 6) - t = a[13] ^ d3 - bc2 = bits.RotateLeft64(t, 25) - t = a[24] ^ d4 - bc3 = bits.RotateLeft64(t, 8) - a[5] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc1 = bits.RotateLeft64(t, 36) - t = a[6] ^ d1 - bc2 = bits.RotateLeft64(t, 10) - t = a[17] ^ d2 - bc3 = bits.RotateLeft64(t, 15) - t = a[3] ^ d3 - bc4 = bits.RotateLeft64(t, 56) - t = a[14] ^ d4 - bc0 = bits.RotateLeft64(t, 27) - a[20] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc3 = bits.RotateLeft64(t, 41) - t = a[21] ^ d1 - bc4 = bits.RotateLeft64(t, 2) - t = a[7] ^ d2 - bc0 = bits.RotateLeft64(t, 62) - t = a[18] ^ d3 - bc1 = bits.RotateLeft64(t, 55) - t = a[4] ^ d4 - bc2 = bits.RotateLeft64(t, 39) - a[10] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - // Round 4 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[1] ^ d1 - bc1 = bits.RotateLeft64(t, 44) - t = a[2] ^ d2 - bc2 = bits.RotateLeft64(t, 43) - t = a[3] ^ d3 - bc3 = bits.RotateLeft64(t, 21) - t = a[4] ^ d4 - bc4 = bits.RotateLeft64(t, 14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] - a[1] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc2 = bits.RotateLeft64(t, 3) - t = a[6] ^ d1 - bc3 = bits.RotateLeft64(t, 45) - t = a[7] ^ d2 - bc4 = bits.RotateLeft64(t, 61) - t = a[8] ^ d3 - bc0 = bits.RotateLeft64(t, 28) - t = a[9] ^ d4 - bc1 = bits.RotateLeft64(t, 20) - a[5] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc4 = bits.RotateLeft64(t, 18) - t = a[11] ^ d1 - bc0 = bits.RotateLeft64(t, 1) - t = a[12] ^ d2 - bc1 = bits.RotateLeft64(t, 6) - t = a[13] ^ d3 - bc2 = bits.RotateLeft64(t, 25) - t = a[14] ^ d4 - bc3 = bits.RotateLeft64(t, 8) - a[10] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc1 = bits.RotateLeft64(t, 36) - t = a[16] ^ d1 - bc2 = bits.RotateLeft64(t, 10) - t = a[17] ^ d2 - bc3 = bits.RotateLeft64(t, 15) - t = a[18] ^ d3 - bc4 = bits.RotateLeft64(t, 56) - t = a[19] ^ d4 - bc0 = bits.RotateLeft64(t, 27) - a[15] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc3 = bits.RotateLeft64(t, 41) - t = a[21] ^ d1 - bc4 = bits.RotateLeft64(t, 2) - t = a[22] ^ d2 - bc0 = bits.RotateLeft64(t, 62) - t = a[23] ^ d3 - bc1 = bits.RotateLeft64(t, 55) - t = a[24] ^ d4 - bc2 = bits.RotateLeft64(t, 39) - a[20] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - } -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go deleted file mode 100644 index b908696be5..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && !purego && gc - -package sha3 - -// This function is implemented in keccakf_amd64.s. - -//go:noescape - -func keccakF1600(a *[25]uint64) diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s deleted file mode 100644 index 99e2f16e97..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ /dev/null @@ -1,5419 +0,0 @@ -// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT. - -//go:build amd64 && !purego && gc - -// func keccakF1600(a *[25]uint64) -TEXT ·keccakF1600(SB), $200-8 - MOVQ a+0(FP), DI - - // Convert the user state into an internal state - NOTQ 8(DI) - NOTQ 16(DI) - NOTQ 64(DI) - NOTQ 96(DI) - NOTQ 136(DI) - NOTQ 160(DI) - - // Execute the KeccakF permutation - MOVQ (DI), SI - MOVQ 8(DI), BP - MOVQ 32(DI), R15 - XORQ 40(DI), SI - XORQ 48(DI), BP - XORQ 72(DI), R15 - XORQ 80(DI), SI - XORQ 88(DI), BP - XORQ 112(DI), R15 - XORQ 120(DI), SI - XORQ 128(DI), BP - XORQ 152(DI), R15 - XORQ 160(DI), SI - XORQ 168(DI), BP - MOVQ 176(DI), DX - MOVQ 184(DI), R8 - XORQ 192(DI), R15 - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000000000001, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000000008082, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x800000000000808a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008000, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000000000808b, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000080000001, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008081, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008009, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000000000008a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000000000088, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000080008009, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000008000000a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000008000808b, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x800000000000008b, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008089, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008003, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008002, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000000080, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x000000000000800a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x800000008000000a, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008081, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000000008080, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - MOVQ R12, BP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - XORQ R10, R15 - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - XORQ R11, R15 - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(DI), R12 - XORQ 56(DI), DX - XORQ R15, BX - XORQ 96(DI), R12 - XORQ 136(DI), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(DI), R13 - XORQ 64(DI), R8 - XORQ SI, CX - XORQ 104(DI), R13 - XORQ 144(DI), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (DI), R10 - MOVQ 48(DI), R11 - XORQ R13, R9 - MOVQ 96(DI), R12 - MOVQ 144(DI), R13 - MOVQ 192(DI), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x0000000080000001, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (SP) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(SP) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(SP) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(SP) - MOVQ R12, 8(SP) - MOVQ R12, BP - - // Result g - MOVQ 72(DI), R11 - XORQ R9, R11 - MOVQ 80(DI), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(DI), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(DI), R13 - MOVQ 176(DI), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(SP) - XORQ AX, SI - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(SP) - XORQ AX, BP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(SP) - NOTQ R14 - XORQ R10, R15 - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(SP) - - // Result k - MOVQ 8(DI), R10 - MOVQ 56(DI), R11 - MOVQ 104(DI), R12 - MOVQ 152(DI), R13 - MOVQ 160(DI), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(SP) - XORQ AX, SI - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(SP) - XORQ AX, BP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(SP) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(SP) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(SP) - XORQ R10, R15 - - // Result m - MOVQ 40(DI), R11 - XORQ BX, R11 - MOVQ 88(DI), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(DI), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(DI), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(DI), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(SP) - XORQ AX, SI - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(SP) - XORQ AX, BP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(SP) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(SP) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(SP) - XORQ R11, R15 - - // Result s - MOVQ 16(DI), R10 - MOVQ 64(DI), R11 - MOVQ 112(DI), R12 - XORQ DX, R10 - MOVQ 120(DI), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(DI), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(SP) - ROLQ $0x27, R12 - XORQ R9, R15 - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(SP) - XORQ BX, SI - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(SP) - XORQ CX, BP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(SP) - MOVQ R8, 184(SP) - - // Prepare round - MOVQ BP, BX - ROLQ $0x01, BX - MOVQ 16(SP), R12 - XORQ 56(SP), DX - XORQ R15, BX - XORQ 96(SP), R12 - XORQ 136(SP), DX - XORQ DX, R12 - MOVQ R12, CX - ROLQ $0x01, CX - MOVQ 24(SP), R13 - XORQ 64(SP), R8 - XORQ SI, CX - XORQ 104(SP), R13 - XORQ 144(SP), R8 - XORQ R8, R13 - MOVQ R13, DX - ROLQ $0x01, DX - MOVQ R15, R8 - XORQ BP, DX - ROLQ $0x01, R8 - MOVQ SI, R9 - XORQ R12, R8 - ROLQ $0x01, R9 - - // Result b - MOVQ (SP), R10 - MOVQ 48(SP), R11 - XORQ R13, R9 - MOVQ 96(SP), R12 - MOVQ 144(SP), R13 - MOVQ 192(SP), R14 - XORQ CX, R11 - ROLQ $0x2c, R11 - XORQ DX, R12 - XORQ BX, R10 - ROLQ $0x2b, R12 - MOVQ R11, SI - MOVQ $0x8000000080008008, AX - ORQ R12, SI - XORQ R10, AX - XORQ AX, SI - MOVQ SI, (DI) - XORQ R9, R14 - ROLQ $0x0e, R14 - MOVQ R10, R15 - ANDQ R11, R15 - XORQ R14, R15 - MOVQ R15, 32(DI) - XORQ R8, R13 - ROLQ $0x15, R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 16(DI) - NOTQ R12 - ORQ R10, R14 - ORQ R13, R12 - XORQ R13, R14 - XORQ R11, R12 - MOVQ R14, 24(DI) - MOVQ R12, 8(DI) - NOP - - // Result g - MOVQ 72(SP), R11 - XORQ R9, R11 - MOVQ 80(SP), R12 - ROLQ $0x14, R11 - XORQ BX, R12 - ROLQ $0x03, R12 - MOVQ 24(SP), R10 - MOVQ R11, AX - ORQ R12, AX - XORQ R8, R10 - MOVQ 128(SP), R13 - MOVQ 176(SP), R14 - ROLQ $0x1c, R10 - XORQ R10, AX - MOVQ AX, 40(DI) - NOP - XORQ CX, R13 - ROLQ $0x2d, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 48(DI) - NOP - XORQ DX, R14 - ROLQ $0x3d, R14 - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 64(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 72(DI) - NOTQ R14 - NOP - ORQ R14, R13 - XORQ R12, R13 - MOVQ R13, 56(DI) - - // Result k - MOVQ 8(SP), R10 - MOVQ 56(SP), R11 - MOVQ 104(SP), R12 - MOVQ 152(SP), R13 - MOVQ 160(SP), R14 - XORQ DX, R11 - ROLQ $0x06, R11 - XORQ R8, R12 - ROLQ $0x19, R12 - MOVQ R11, AX - ORQ R12, AX - XORQ CX, R10 - ROLQ $0x01, R10 - XORQ R10, AX - MOVQ AX, 80(DI) - NOP - XORQ R9, R13 - ROLQ $0x08, R13 - MOVQ R12, AX - ANDQ R13, AX - XORQ R11, AX - MOVQ AX, 88(DI) - NOP - XORQ BX, R14 - ROLQ $0x12, R14 - NOTQ R13 - MOVQ R13, AX - ANDQ R14, AX - XORQ R12, AX - MOVQ AX, 96(DI) - MOVQ R14, AX - ORQ R10, AX - XORQ R13, AX - MOVQ AX, 104(DI) - ANDQ R11, R10 - XORQ R14, R10 - MOVQ R10, 112(DI) - NOP - - // Result m - MOVQ 40(SP), R11 - XORQ BX, R11 - MOVQ 88(SP), R12 - ROLQ $0x24, R11 - XORQ CX, R12 - MOVQ 32(SP), R10 - ROLQ $0x0a, R12 - MOVQ R11, AX - MOVQ 136(SP), R13 - ANDQ R12, AX - XORQ R9, R10 - MOVQ 184(SP), R14 - ROLQ $0x1b, R10 - XORQ R10, AX - MOVQ AX, 120(DI) - NOP - XORQ DX, R13 - ROLQ $0x0f, R13 - MOVQ R12, AX - ORQ R13, AX - XORQ R11, AX - MOVQ AX, 128(DI) - NOP - XORQ R8, R14 - ROLQ $0x38, R14 - NOTQ R13 - MOVQ R13, AX - ORQ R14, AX - XORQ R12, AX - MOVQ AX, 136(DI) - ORQ R10, R11 - XORQ R14, R11 - MOVQ R11, 152(DI) - ANDQ R10, R14 - XORQ R13, R14 - MOVQ R14, 144(DI) - NOP - - // Result s - MOVQ 16(SP), R10 - MOVQ 64(SP), R11 - MOVQ 112(SP), R12 - XORQ DX, R10 - MOVQ 120(SP), R13 - ROLQ $0x3e, R10 - XORQ R8, R11 - MOVQ 168(SP), R14 - ROLQ $0x37, R11 - XORQ R9, R12 - MOVQ R10, R9 - XORQ CX, R14 - ROLQ $0x02, R14 - ANDQ R11, R9 - XORQ R14, R9 - MOVQ R9, 192(DI) - ROLQ $0x27, R12 - NOP - NOTQ R11 - XORQ BX, R13 - MOVQ R11, BX - ANDQ R12, BX - XORQ R10, BX - MOVQ BX, 160(DI) - NOP - ROLQ $0x29, R13 - MOVQ R12, CX - ORQ R13, CX - XORQ R11, CX - MOVQ CX, 168(DI) - NOP - MOVQ R13, DX - MOVQ R14, R8 - ANDQ R14, DX - ORQ R10, R8 - XORQ R12, DX - XORQ R13, R8 - MOVQ DX, 176(DI) - MOVQ R8, 184(DI) - - // Revert the internal state to the user state - NOTQ 8(DI) - NOTQ 16(DI) - NOTQ 64(DI) - NOTQ 96(DI) - NOTQ 136(DI) - NOTQ 160(DI) - RET diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3.go deleted file mode 100644 index 6658c44479..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import ( - "crypto/subtle" - "encoding/binary" - "errors" - "unsafe" - - "golang.org/x/sys/cpu" -) - -// spongeDirection indicates the direction bytes are flowing through the sponge. -type spongeDirection int - -const ( - // spongeAbsorbing indicates that the sponge is absorbing input. - spongeAbsorbing spongeDirection = iota - // spongeSqueezing indicates that the sponge is being squeezed. - spongeSqueezing -) - -type state struct { - a [1600 / 8]byte // main state of the hash - - // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR - // into before running the permutation. If squeezing, it's the remaining - // output to produce before running the permutation. - n, rate int - - // dsbyte contains the "domain separation" bits and the first bit of - // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the - // SHA-3 and SHAKE functions by appending bitstrings to the message. - // Using a little-endian bit-ordering convention, these are "01" for SHA-3 - // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the - // padding rule from section 5.1 is applied to pad the message to a multiple - // of the rate, which involves adding a "1" bit, zero or more "0" bits, and - // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, - // giving 00000110b (0x06) and 00011111b (0x1f). - // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf - // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and - // Extendable-Output Functions (May 2014)" - dsbyte byte - - outputLen int // the default output size in bytes - state spongeDirection // whether the sponge is absorbing or squeezing -} - -// BlockSize returns the rate of sponge underlying this hash function. -func (d *state) BlockSize() int { return d.rate } - -// Size returns the output size of the hash function in bytes. -func (d *state) Size() int { return d.outputLen } - -// Reset clears the internal state by zeroing the sponge state and -// the buffer indexes, and setting Sponge.state to absorbing. -func (d *state) Reset() { - // Zero the permutation's state. - for i := range d.a { - d.a[i] = 0 - } - d.state = spongeAbsorbing - d.n = 0 -} - -func (d *state) clone() *state { - ret := *d - return &ret -} - -// permute applies the KeccakF-1600 permutation. -func (d *state) permute() { - var a *[25]uint64 - if cpu.IsBigEndian { - a = new([25]uint64) - for i := range a { - a[i] = binary.LittleEndian.Uint64(d.a[i*8:]) - } - } else { - a = (*[25]uint64)(unsafe.Pointer(&d.a)) - } - - keccakF1600(a) - d.n = 0 - - if cpu.IsBigEndian { - for i := range a { - binary.LittleEndian.PutUint64(d.a[i*8:], a[i]) - } - } -} - -// pads appends the domain separation bits in dsbyte, applies -// the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *state) padAndPermute() { - // Pad with this instance's domain-separator bits. We know that there's - // at least one byte of space in the sponge because, if it were full, - // permute would have been called to empty it. dsbyte also contains the - // first one bit for the padding. See the comment in the state struct. - d.a[d.n] ^= d.dsbyte - // This adds the final one bit for the padding. Because of the way that - // bits are numbered from the LSB upwards, the final bit is the MSB of - // the last byte. - d.a[d.rate-1] ^= 0x80 - // Apply the permutation - d.permute() - d.state = spongeSqueezing -} - -// Write absorbs more data into the hash's state. It panics if any -// output has already been read. -func (d *state) Write(p []byte) (n int, err error) { - if d.state != spongeAbsorbing { - panic("sha3: Write after Read") - } - - n = len(p) - - for len(p) > 0 { - x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p) - d.n += x - p = p[x:] - - // If the sponge is full, apply the permutation. - if d.n == d.rate { - d.permute() - } - } - - return -} - -// Read squeezes an arbitrary number of bytes from the sponge. -func (d *state) Read(out []byte) (n int, err error) { - // If we're still absorbing, pad and apply the permutation. - if d.state == spongeAbsorbing { - d.padAndPermute() - } - - n = len(out) - - // Now, do the squeezing. - for len(out) > 0 { - // Apply the permutation if we've squeezed the sponge dry. - if d.n == d.rate { - d.permute() - } - - x := copy(out, d.a[d.n:d.rate]) - d.n += x - out = out[x:] - } - - return -} - -// Sum applies padding to the hash state and then squeezes out the desired -// number of output bytes. It panics if any output has already been read. -func (d *state) Sum(in []byte) []byte { - if d.state != spongeAbsorbing { - panic("sha3: Sum after Read") - } - - // Make a copy of the original hash so that caller can keep writing - // and summing. - dup := d.clone() - hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation - dup.Read(hash) - return append(in, hash...) -} - -const ( - magicSHA3 = "sha\x08" - magicShake = "sha\x09" - magicCShake = "sha\x0a" - magicKeccak = "sha\x0b" - // magic || rate || main state || n || sponge direction - marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1 -) - -func (d *state) MarshalBinary() ([]byte, error) { - return d.AppendBinary(make([]byte, 0, marshaledSize)) -} - -func (d *state) AppendBinary(b []byte) ([]byte, error) { - switch d.dsbyte { - case dsbyteSHA3: - b = append(b, magicSHA3...) - case dsbyteShake: - b = append(b, magicShake...) - case dsbyteCShake: - b = append(b, magicCShake...) - case dsbyteKeccak: - b = append(b, magicKeccak...) - default: - panic("unknown dsbyte") - } - // rate is at most 168, and n is at most rate. - b = append(b, byte(d.rate)) - b = append(b, d.a[:]...) - b = append(b, byte(d.n), byte(d.state)) - return b, nil -} - -func (d *state) UnmarshalBinary(b []byte) error { - if len(b) != marshaledSize { - return errors.New("sha3: invalid hash state") - } - - magic := string(b[:len(magicSHA3)]) - b = b[len(magicSHA3):] - switch { - case magic == magicSHA3 && d.dsbyte == dsbyteSHA3: - case magic == magicShake && d.dsbyte == dsbyteShake: - case magic == magicCShake && d.dsbyte == dsbyteCShake: - case magic == magicKeccak && d.dsbyte == dsbyteKeccak: - default: - return errors.New("sha3: invalid hash state identifier") - } - - rate := int(b[0]) - b = b[1:] - if rate != d.rate { - return errors.New("sha3: invalid hash state function") - } - - copy(d.a[:], b) - b = b[len(d.a):] - - n, state := int(b[0]), spongeDirection(b[1]) - if n > d.rate { - return errors.New("sha3: invalid hash state") - } - d.n = n - if state != spongeAbsorbing && state != spongeSqueezing { - return errors.New("sha3: invalid hash state") - } - d.state = state - - return nil -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.go deleted file mode 100644 index 00d8034ae6..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego - -package sha3 - -// This file contains code for using the 'compute intermediate -// message digest' (KIMD) and 'compute last message digest' (KLMD) -// instructions to compute SHA-3 and SHAKE hashes on IBM Z. - -import ( - "hash" - - "golang.org/x/sys/cpu" -) - -// codes represent 7-bit KIMD/KLMD function codes as defined in -// the Principles of Operation. -type code uint64 - -const ( - // function codes for KIMD/KLMD - sha3_224 code = 32 - sha3_256 = 33 - sha3_384 = 34 - sha3_512 = 35 - shake_128 = 36 - shake_256 = 37 - nopad = 0x100 -) - -// kimd is a wrapper for the 'compute intermediate message digest' instruction. -// src must be a multiple of the rate for the given function code. -// -//go:noescape -func kimd(function code, chain *[200]byte, src []byte) - -// klmd is a wrapper for the 'compute last message digest' instruction. -// src padding is handled by the instruction. -// -//go:noescape -func klmd(function code, chain *[200]byte, dst, src []byte) - -type asmState struct { - a [200]byte // 1600 bit state - buf []byte // care must be taken to ensure cap(buf) is a multiple of rate - rate int // equivalent to block size - storage [3072]byte // underlying storage for buf - outputLen int // output length for full security - function code // KIMD/KLMD function code - state spongeDirection // whether the sponge is absorbing or squeezing -} - -func newAsmState(function code) *asmState { - var s asmState - s.function = function - switch function { - case sha3_224: - s.rate = 144 - s.outputLen = 28 - case sha3_256: - s.rate = 136 - s.outputLen = 32 - case sha3_384: - s.rate = 104 - s.outputLen = 48 - case sha3_512: - s.rate = 72 - s.outputLen = 64 - case shake_128: - s.rate = 168 - s.outputLen = 32 - case shake_256: - s.rate = 136 - s.outputLen = 64 - default: - panic("sha3: unrecognized function code") - } - - // limit s.buf size to a multiple of s.rate - s.resetBuf() - return &s -} - -func (s *asmState) clone() *asmState { - c := *s - c.buf = c.storage[:len(s.buf):cap(s.buf)] - return &c -} - -// copyIntoBuf copies b into buf. It will panic if there is not enough space to -// store all of b. -func (s *asmState) copyIntoBuf(b []byte) { - bufLen := len(s.buf) - s.buf = s.buf[:len(s.buf)+len(b)] - copy(s.buf[bufLen:], b) -} - -// resetBuf points buf at storage, sets the length to 0 and sets cap to be a -// multiple of the rate. -func (s *asmState) resetBuf() { - max := (cap(s.storage) / s.rate) * s.rate - s.buf = s.storage[:0:max] -} - -// Write (via the embedded io.Writer interface) adds more data to the running hash. -// It never returns an error. -func (s *asmState) Write(b []byte) (int, error) { - if s.state != spongeAbsorbing { - panic("sha3: Write after Read") - } - length := len(b) - for len(b) > 0 { - if len(s.buf) == 0 && len(b) >= cap(s.buf) { - // Hash the data directly and push any remaining bytes - // into the buffer. - remainder := len(b) % s.rate - kimd(s.function, &s.a, b[:len(b)-remainder]) - if remainder != 0 { - s.copyIntoBuf(b[len(b)-remainder:]) - } - return length, nil - } - - if len(s.buf) == cap(s.buf) { - // flush the buffer - kimd(s.function, &s.a, s.buf) - s.buf = s.buf[:0] - } - - // copy as much as we can into the buffer - n := len(b) - if len(b) > cap(s.buf)-len(s.buf) { - n = cap(s.buf) - len(s.buf) - } - s.copyIntoBuf(b[:n]) - b = b[n:] - } - return length, nil -} - -// Read squeezes an arbitrary number of bytes from the sponge. -func (s *asmState) Read(out []byte) (n int, err error) { - // The 'compute last message digest' instruction only stores the digest - // at the first operand (dst) for SHAKE functions. - if s.function != shake_128 && s.function != shake_256 { - panic("sha3: can only call Read for SHAKE functions") - } - - n = len(out) - - // need to pad if we were absorbing - if s.state == spongeAbsorbing { - s.state = spongeSqueezing - - // write hash directly into out if possible - if len(out)%s.rate == 0 { - klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 - s.buf = s.buf[:0] - return - } - - // write hash into buffer - max := cap(s.buf) - if max > len(out) { - max = (len(out)/s.rate)*s.rate + s.rate - } - klmd(s.function, &s.a, s.buf[:max], s.buf) - s.buf = s.buf[:max] - } - - for len(out) > 0 { - // flush the buffer - if len(s.buf) != 0 { - c := copy(out, s.buf) - out = out[c:] - s.buf = s.buf[c:] - continue - } - - // write hash directly into out if possible - if len(out)%s.rate == 0 { - klmd(s.function|nopad, &s.a, out, nil) - return - } - - // write hash into buffer - s.resetBuf() - if cap(s.buf) > len(out) { - s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] - } - klmd(s.function|nopad, &s.a, s.buf, nil) - } - return -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (s *asmState) Sum(b []byte) []byte { - if s.state != spongeAbsorbing { - panic("sha3: Sum after Read") - } - - // Copy the state to preserve the original. - a := s.a - - // Hash the buffer. Note that we don't clear it because we - // aren't updating the state. - switch s.function { - case sha3_224, sha3_256, sha3_384, sha3_512: - klmd(s.function, &a, nil, s.buf) - return append(b, a[:s.outputLen]...) - case shake_128, shake_256: - d := make([]byte, s.outputLen, 64) - klmd(s.function, &a, d, s.buf) - return append(b, d[:s.outputLen]...) - default: - panic("sha3: unknown function") - } -} - -// Reset resets the Hash to its initial state. -func (s *asmState) Reset() { - for i := range s.a { - s.a[i] = 0 - } - s.resetBuf() - s.state = spongeAbsorbing -} - -// Size returns the number of bytes Sum will return. -func (s *asmState) Size() int { - return s.outputLen -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (s *asmState) BlockSize() int { - return s.rate -} - -// Clone returns a copy of the ShakeHash in its current state. -func (s *asmState) Clone() ShakeHash { - return s.clone() -} - -// new224 returns an assembly implementation of SHA3-224 if available, -// otherwise it returns a generic implementation. -func new224() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_224) - } - return new224Generic() -} - -// new256 returns an assembly implementation of SHA3-256 if available, -// otherwise it returns a generic implementation. -func new256() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_256) - } - return new256Generic() -} - -// new384 returns an assembly implementation of SHA3-384 if available, -// otherwise it returns a generic implementation. -func new384() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_384) - } - return new384Generic() -} - -// new512 returns an assembly implementation of SHA3-512 if available, -// otherwise it returns a generic implementation. -func new512() hash.Hash { - if cpu.S390X.HasSHA3 { - return newAsmState(sha3_512) - } - return new512Generic() -} - -// newShake128 returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns a generic implementation. -func newShake128() ShakeHash { - if cpu.S390X.HasSHA3 { - return newAsmState(shake_128) - } - return newShake128Generic() -} - -// newShake256 returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns a generic implementation. -func newShake256() ShakeHash { - if cpu.S390X.HasSHA3 { - return newAsmState(shake_256) - } - return newShake256Generic() -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.s deleted file mode 100644 index 826b862c77..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.s +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego - -#include "textflag.h" - -// func kimd(function code, chain *[200]byte, src []byte) -TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 - MOVD function+0(FP), R0 - MOVD chain+8(FP), R1 - LMG src+16(FP), R2, R3 // R2=base, R3=len - -continue: - WORD $0xB93E0002 // KIMD --, R2 - BVS continue // continue if interrupted - MOVD $0, R0 // reset R0 for pre-go1.8 compilers - RET - -// func klmd(function code, chain *[200]byte, dst, src []byte) -TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 - // TODO: SHAKE support - MOVD function+0(FP), R0 - MOVD chain+8(FP), R1 - LMG dst+16(FP), R2, R3 // R2=base, R3=len - LMG src+40(FP), R4, R5 // R4=base, R5=len - -continue: - WORD $0xB93F0024 // KLMD R2, R4 - BVS continue // continue if interrupted - MOVD $0, R0 // reset R0 for pre-go1.8 compilers - RET diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/shake.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/shake.go deleted file mode 100644 index a6b3a4281f..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/shake.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// This file defines the ShakeHash interface, and provides -// functions for creating SHAKE and cSHAKE instances, as well as utility -// functions for hashing bytes to arbitrary-length output. -// -// -// SHAKE implementation is based on FIPS PUB 202 [1] -// cSHAKE implementations is based on NIST SP 800-185 [2] -// -// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf -// [2] https://doi.org/10.6028/NIST.SP.800-185 - -import ( - "bytes" - "encoding/binary" - "errors" - "hash" - "io" - "math/bits" -) - -// ShakeHash defines the interface to hash functions that support -// arbitrary-length output. When used as a plain [hash.Hash], it -// produces minimum-length outputs that provide full-strength generic -// security. -type ShakeHash interface { - hash.Hash - - // Read reads more output from the hash; reading affects the hash's - // state. (ShakeHash.Read is thus very different from Hash.Sum) - // It never returns an error, but subsequent calls to Write or Sum - // will panic. - io.Reader - - // Clone returns a copy of the ShakeHash in its current state. - Clone() ShakeHash -} - -// cSHAKE specific context -type cshakeState struct { - *state // SHA-3 state context and Read/Write operations - - // initBlock is the cSHAKE specific initialization set of bytes. It is initialized - // by newCShake function and stores concatenation of N followed by S, encoded - // by the method specified in 3.3 of [1]. - // It is stored here in order for Reset() to be able to put context into - // initial state. - initBlock []byte -} - -func bytepad(data []byte, rate int) []byte { - out := make([]byte, 0, 9+len(data)+rate-1) - out = append(out, leftEncode(uint64(rate))...) - out = append(out, data...) - if padlen := rate - len(out)%rate; padlen < rate { - out = append(out, make([]byte, padlen)...) - } - return out -} - -func leftEncode(x uint64) []byte { - // Let n be the smallest positive integer for which 2^(8n) > x. - n := (bits.Len64(x) + 7) / 8 - if n == 0 { - n = 1 - } - // Return n || x with n as a byte and x an n bytes in big-endian order. - b := make([]byte, 9) - binary.BigEndian.PutUint64(b[1:], x) - b = b[9-n-1:] - b[0] = byte(n) - return b -} - -func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { - c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} - c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) - c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) - c.initBlock = append(c.initBlock, S...) - c.Write(bytepad(c.initBlock, c.rate)) - return &c -} - -// Reset resets the hash to initial state. -func (c *cshakeState) Reset() { - c.state.Reset() - c.Write(bytepad(c.initBlock, c.rate)) -} - -// Clone returns copy of a cSHAKE context within its current state. -func (c *cshakeState) Clone() ShakeHash { - b := make([]byte, len(c.initBlock)) - copy(b, c.initBlock) - return &cshakeState{state: c.clone(), initBlock: b} -} - -// Clone returns copy of SHAKE context within its current state. -func (c *state) Clone() ShakeHash { - return c.clone() -} - -func (c *cshakeState) MarshalBinary() ([]byte, error) { - return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock))) -} - -func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) { - b, err := c.state.AppendBinary(b) - if err != nil { - return nil, err - } - b = append(b, c.initBlock...) - return b, nil -} - -func (c *cshakeState) UnmarshalBinary(b []byte) error { - if len(b) <= marshaledSize { - return errors.New("sha3: invalid hash state") - } - if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil { - return err - } - c.initBlock = bytes.Clone(b[marshaledSize:]) - return nil -} - -// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. -// Its generic security strength is 128 bits against all attacks if at -// least 32 bytes of its output are used. -func NewShake128() ShakeHash { - return newShake128() -} - -// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. -// Its generic security strength is 256 bits against all attacks if -// at least 64 bytes of its output are used. -func NewShake256() ShakeHash { - return newShake256() -} - -func newShake128Generic() *state { - return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake} -} - -func newShake256Generic() *state { - return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake} -} - -// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, -// a customizable variant of SHAKE128. -// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is -// desired. S is a customization byte string used for domain separation - two cSHAKE -// computations on same input with different S yield unrelated outputs. -// When N and S are both empty, this is equivalent to NewShake128. -func NewCShake128(N, S []byte) ShakeHash { - if len(N) == 0 && len(S) == 0 { - return NewShake128() - } - return newCShake(N, S, rateK256, 32, dsbyteCShake) -} - -// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, -// a customizable variant of SHAKE256. -// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is -// desired. S is a customization byte string used for domain separation - two cSHAKE -// computations on same input with different S yield unrelated outputs. -// When N and S are both empty, this is equivalent to NewShake256. -func NewCShake256(N, S []byte) ShakeHash { - if len(N) == 0 && len(S) == 0 { - return NewShake256() - } - return newCShake(N, S, rateK512, 64, dsbyteCShake) -} - -// ShakeSum128 writes an arbitrary-length digest of data into hash. -func ShakeSum128(hash, data []byte) { - h := NewShake128() - h.Write(data) - h.Read(hash) -} - -// ShakeSum256 writes an arbitrary-length digest of data into hash. -func ShakeSum256(hash, data []byte) { - h := NewShake256() - h.Write(data) - h.Read(hash) -} diff --git a/openshift/tools/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/openshift/tools/vendor/golang.org/x/crypto/sha3/shake_noasm.go deleted file mode 100644 index 4276ba4ab2..0000000000 --- a/openshift/tools/vendor/golang.org/x/crypto/sha3/shake_noasm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -func newShake128() *state { - return newShake128Generic() -} - -func newShake256() *state { - return newShake256Generic() -} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/config.go b/openshift/tools/vendor/golang.org/x/net/http2/config.go index ca645d9a1a..8a7a89d016 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/config.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/config.go @@ -27,6 +27,7 @@ import ( // - If the resulting value is zero or out of range, use a default. type http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, CountError: h2.CountError, } - fillNetHTTPServerConfig(&conf, h1) + fillNetHTTPConfig(&conf, h1.HTTP2) setConfigDefaults(&conf, true) return conf } @@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config { } if h2.t1 != nil { - fillNetHTTPTransportConfig(&conf, h2.t1) + fillNetHTTPConfig(&conf, h2.t1.HTTP2) } setConfigDefaults(&conf, false) return conf @@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 { const typicalHeaders = 10 // conservative return n + typicalHeaders*perFieldOverhead } + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/config_go124.go b/openshift/tools/vendor/golang.org/x/net/http2/config_go124.go deleted file mode 100644 index 5b516c55ff..0000000000 --- a/openshift/tools/vendor/golang.org/x/net/http2/config_go124.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.24 - -package http2 - -import "net/http" - -// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { - fillNetHTTPConfig(conf, srv.HTTP2) -} - -// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { - fillNetHTTPConfig(conf, tr.HTTP2) -} - -func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { - if h2 == nil { - return - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxEncoderHeaderTableSize != 0 { - conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) - } - if h2.MaxDecoderHeaderTableSize != 0 { - conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxReadFrameSize != 0 { - conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) - } - if h2.MaxReceiveBufferPerConnection != 0 { - conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) - } - if h2.MaxReceiveBufferPerStream != 0 { - conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) - } - if h2.SendPingTimeout != 0 { - conf.SendPingTimeout = h2.SendPingTimeout - } - if h2.PingTimeout != 0 { - conf.PingTimeout = h2.PingTimeout - } - if h2.WriteByteTimeout != 0 { - conf.WriteByteTimeout = h2.WriteByteTimeout - } - if h2.PermitProhibitedCipherSuites { - conf.PermitProhibitedCipherSuites = true - } - if h2.CountError != nil { - conf.CountError = h2.CountError - } -} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/config_go125.go b/openshift/tools/vendor/golang.org/x/net/http2/config_go125.go new file mode 100644 index 0000000000..b4373fe33c --- /dev/null +++ b/openshift/tools/vendor/golang.org/x/net/http2/config_go125.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return false +} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/config_go126.go b/openshift/tools/vendor/golang.org/x/net/http2/config_go126.go new file mode 100644 index 0000000000..6b071c149d --- /dev/null +++ b/openshift/tools/vendor/golang.org/x/net/http2/config_go126.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/config_pre_go124.go b/openshift/tools/vendor/golang.org/x/net/http2/config_pre_go124.go deleted file mode 100644 index 060fd6c64c..0000000000 --- a/openshift/tools/vendor/golang.org/x/net/http2/config_pre_go124.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.24 - -package http2 - -import "net/http" - -// Pre-Go 1.24 fallback. -// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. - -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} - -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/frame.go b/openshift/tools/vendor/golang.org/x/net/http2/frame.go index db3264da8c..9a4bd123c9 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/frame.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/frame.go @@ -280,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } - return nil, ErrFrameTooLarge + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { @@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1152,7 +1180,16 @@ type PriorityFrame struct { PriorityParam } -// PriorityParam are the stream prioritzation parameters. +var defaultRFC9218Priority = PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no @@ -1167,6 +1204,20 @@ type PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p PriorityParam) IsZero() bool { diff --git a/openshift/tools/vendor/golang.org/x/net/http2/gotrack.go b/openshift/tools/vendor/golang.org/x/net/http2/gotrack.go index 9933c9f8c7..9921ca096d 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/gotrack.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/gotrack.go @@ -15,21 +15,32 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" +// Setting DebugGoroutines to false during a test to disable goroutine debugging +// results in race detector complaints when a test leaves goroutines running before +// returning. Tests shouldn't do this, of course, but when they do it generally shows +// up as infrequent, hard-to-debug flakes. (See #66519.) +// +// Disable goroutine debugging during individual tests with an atomic bool. +// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition +// here is harmless.) +var disableDebugGoroutines atomic.Bool + type goroutineLock uint64 func newGoroutineLock() goroutineLock { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() != uint64(g) { @@ -38,7 +49,7 @@ func (g goroutineLock) check() { } func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() == uint64(g) { diff --git a/openshift/tools/vendor/golang.org/x/net/http2/http2.go b/openshift/tools/vendor/golang.org/x/net/http2/http2.go index 6c18ea230b..105fe12fef 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/http2.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/http2.go @@ -11,13 +11,10 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( "bufio" - "context" "crypto/tls" "errors" "fmt" @@ -37,7 +34,6 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool - inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket @@ -257,15 +253,13 @@ func (cw closeWaiter) Wait() { // idle memory usage with many connections. type bufferedWriter struct { _ incomparable - group synctestGroupInterface // immutable - conn net.Conn // immutable - bw *bufio.Writer // non-nil when data is buffered - byteTimeout time.Duration // immutable, WriteByteTimeout + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { +func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter { return &bufferedWriter{ - group: group, conn: conn, byteTimeout: timeout, } @@ -316,24 +310,18 @@ func (w *bufferedWriter) Flush() error { type bufferedWriterTimeoutWriter bufferedWriter func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { - return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) + return writeWithByteTimeout(w.conn, w.byteTimeout, p) } // writeWithByteTimeout writes to conn. // If more than timeout passes without any bytes being written to the connection, // the write fails. -func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { +func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { if timeout <= 0 { return conn.Write(p) } for { - var now time.Time - if group == nil { - now = time.Now() - } else { - now = group.Now() - } - conn.SetWriteDeadline(now.Add(timeout)) + conn.SetWriteDeadline(time.Now().Add(timeout)) nn, err := conn.Write(p[n:]) n += nn if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { @@ -419,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() - -// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. -// It's defined as an interface here to let us keep synctestGroup entirely test-only -// and not a part of non-test builds. -type synctestGroupInterface interface { - Join() - Now() time.Time - NewTimer(d time.Duration) timer - AfterFunc(d time.Duration, f func()) timer - ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) -} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/server.go b/openshift/tools/vendor/golang.org/x/net/http2/server.go index 51fca38f61..bdc5520ebd 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/server.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/server.go @@ -176,44 +176,15 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState - - // Synchronization group used for testing. - // Outside of tests, this is nil. - group synctestGroupInterface -} - -func (s *Server) markNewGoroutine() { - if s.group != nil { - s.group.Join() - } -} - -func (s *Server) now() time.Time { - if s.group != nil { - return s.group.Now() - } - return time.Now() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (s *Server) newTimer(d time.Duration) timer { - if s.group != nil { - return s.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (s *Server) afterFunc(d time.Duration, f func()) timer { - if s.group != nil { - return s.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *serverInternalState) registerConn(sc *serverConn) { @@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *serverInternalState) getErrChan() chan error { + if s == nil { + return errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *serverInternalState) putErrChan(ch chan error) { + if s == nil { + errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + conf.state = &serverInternalState{ + activeConns: make(map[*serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + if opts == nil { + opts = &ServeConnOpts{} + } s.serveConn(c, opts, nil) } @@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), + bw: newBufferedWriter(c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -638,11 +636,11 @@ type serverConn struct { pingSent bool sentPingData [8]byte goAwayCode ErrCode - shutdownTimer timer // nil until used - idleTimer timer // nil if unused + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused readIdleTimeout time.Duration pingTimeout time.Duration - readIdleTimer timer // nil if unused + readIdleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -687,12 +685,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline timer // nil if unused - writeDeadline timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused + writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -848,7 +846,6 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - sc.srv.markNewGoroutine() gate := make(chan struct{}) gateDone := func() { gate <- struct{}{} } for { @@ -881,7 +878,6 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { - sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } if conf.SendPingTimeout > 0 { sc.readIdleTimeout = conf.SendPingTimeout - sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) defer sc.readIdleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() - lastFrameTime := sc.srv.now() + lastFrameTime := time.Now() loopNum := 0 for { loopNum++ @@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: - lastFrameTime = sc.srv.now() + lastFrameTime = time.Now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { } pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) - now := sc.srv.now() + now := time.Now() if pingAt.After(now) { // We received frames since arming the ping timer. // Reset it for the next possible timeout. @@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C(): + case <-timer.C: return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error { } } -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) + ch := sc.srv.state.getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - errChanPool.Put(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = errChanPool.Get().(chan error) + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - errChanPool.Put(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) { if err == io.EOF { b.sawEOF = true } - if b.conn == nil && inTests { + if b.conn == nil { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) @@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = rws.conn.srv.now().UTC().Format(http.TimeFormat) + date = time.Now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(sc.srv.now())) + st.readDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) + st.writeDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), + done: sc.srv.state.getErrChan(), } select { @@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - errChanPool.Put(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } diff --git a/openshift/tools/vendor/golang.org/x/net/http2/timer.go b/openshift/tools/vendor/golang.org/x/net/http2/timer.go deleted file mode 100644 index 0b1c17b812..0000000000 --- a/openshift/tools/vendor/golang.org/x/net/http2/timer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import "time" - -// A timer is a time.Timer, as an interface which can be replaced in tests. -type timer = interface { - C() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -// timeTimer adapts a time.Timer to the timer interface. -type timeTimer struct { - *time.Timer -} - -func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/openshift/tools/vendor/golang.org/x/net/http2/transport.go b/openshift/tools/vendor/golang.org/x/net/http2/transport.go index f26356b9cd..1965913e54 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/transport.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -193,50 +194,6 @@ type Transport struct { type transportTestHooks struct { newclientconn func(*ClientConn) - group synctestGroupInterface -} - -func (t *Transport) markNewGoroutine() { - if t != nil && t.transportTestHooks != nil { - t.transportTestHooks.group.Join() - } -} - -func (t *Transport) now() time.Time { - if t != nil && t.transportTestHooks != nil { - return t.transportTestHooks.group.Now() - } - return time.Now() -} - -func (t *Transport) timeSince(when time.Time) time.Duration { - if t != nil && t.transportTestHooks != nil { - return t.now().Sub(when) - } - return time.Since(when) -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (t *Transport) newTimer(d time.Duration) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (t *Transport) afterFunc(d time.Duration, f func()) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} -} - -func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.ContextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -366,7 +323,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -399,6 +356,7 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -534,14 +492,12 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { - cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() } type stickyErrWriter struct { - group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -551,7 +507,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + n, err = writeWithByteTimeout(sew.conn, sew.timeout, p) *sew.err = err return n, err } @@ -650,9 +606,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - tm := t.newTimer(d) + tm := time.NewTimer(d) select { - case <-tm.C(): + case <-tm.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): @@ -699,6 +655,7 @@ var ( errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnNotEstablished = errors.New("http2: client conn could not be established") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -829,7 +786,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -838,14 +796,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - lastActive: t.now(), + lastActive: time.Now(), } - var group synctestGroupInterface if t.transportTestHooks != nil { - t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn - group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -857,7 +812,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ - group: group, conn: c, timeout: conf.WriteByteTimeout, err: &cc.werr, @@ -906,7 +860,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // Start the idle timer after the connection is fully initialized. if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } go cc.readLoop() @@ -917,7 +871,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1067,7 +1021,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -1120,7 +1074,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1186,7 +1140,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { - cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1257,8 +1210,7 @@ func (cc *ClientConn) closeForError(err error) { // // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { - err := errors.New("http2: client connection force closed via ClientConn.Close") - cc.closeForError(err) + cc.closeForError(errClientConnForceClosed) return nil } @@ -1427,7 +1379,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { - cs.cc.t.markNewGoroutine() err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1558,9 +1509,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.t.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1753,7 +1704,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { // Return a fatal error which aborts the retry loop. return errClientConnNotEstablished } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } @@ -2092,10 +2043,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = cc.t.now() + cc.lastIdle = time.Now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2121,7 +2072,6 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2188,9 +2138,9 @@ func (rl *clientConnReadLoop) cleanup() { if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } - idleTime := cc.t.now().Sub(cc.lastActive) + idleTime := time.Now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { - cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) } else { @@ -2250,9 +2200,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.readIdleTimeout - var t timer + var t *time.Timer if readIdleTimeout != 0 { - t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2998,7 +2948,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error { var pingError error errc := make(chan struct{}) go func() { - cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3128,35 +3077,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close } -func (gz *gzipReader) Read(p []byte) (n int, err error) { +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() if gz.zerr != nil { - return 0, gz.zerr + return nil, gz.zerr } if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr } } - return gz.zr.Read(p) + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil } -func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil } gz.zerr = fs.ErrClosed - return nil +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + zr, err := gz.acquire() + if err != nil { + return 0, err + } + defer gz.release(zr) + + return zr.Read(p) +} + +func (gz *gzipReader) Close() error { + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } @@ -3228,7 +3244,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = cc.t.timeSince(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/openshift/tools/vendor/golang.org/x/net/http2/writesched.go b/openshift/tools/vendor/golang.org/x/net/http2/writesched.go index cc893adc29..7de27be525 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/writesched.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/writesched.go @@ -42,6 +42,8 @@ type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} + } + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority.go b/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index f6783339d1..0000000000 --- a/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this function returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if wr.isControl() { - n = &ws.root - } else { - id := wr.StreamID() - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. In other case, we push wr onto the root, rather - // than creating a new priorityNode. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for n.kids != nil { - n.kids.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go new file mode 100644 index 0000000000..4e33c29a24 --- /dev/null +++ b/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -0,0 +1,450 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*priorityNodeRFC7540), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeStateRFC7540 int + +const ( + priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota + priorityNodeClosedRFC7540 + priorityNodeIdleRFC7540 +) + +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNodeRFC7540 struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNodeRFC7540 + kids *priorityNodeRFC7540 // start of the kids list + prev, next *priorityNodeRFC7540 // doubly-linked list of siblings +} + +func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNodeRFC7540) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this function returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpenRFC7540) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540 + +func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } +func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteSchedulerRFC7540 struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNodeRFC7540 + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNodeRFC7540 + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNodeRFC7540 + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNodeRFC7540 + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdleRFC7540 { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpenRFC7540 + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNodeRFC7540{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeOpenRFC7540, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpenRFC7540 { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosedRFC7540 + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNodeRFC7540{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeIdleRFC7540, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeightRFC7540 + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) { + var n *priorityNodeRFC7540 + if wr.isControl() { + n = &ws.root + } else { + id := wr.StreamID() + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. In other case, we push wr onto the root, rather + // than creating a new priorityNode. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) { + for n.kids != nil { + n.kids.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go new file mode 100644 index 0000000000..cb4cadc32d --- /dev/null +++ b/openshift/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -0,0 +1,209 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type streamMetadata struct { + location *writeQueue + priority PriorityParam +} + +type priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]streamMetadata + + // queuePool are empty queues for reuse. + queuePool writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func newPriorityWriteSchedulerRFC9218() WriteScheduler { + ws := &priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]streamMetadata), + } + return ws +} + +func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return FrameWriteRequest{}, false +} diff --git a/openshift/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/openshift/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go index 54fe86322d..737cff9ecb 100644 --- a/openshift/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go +++ b/openshift/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/openshift/tools/vendor/golang.org/x/net/internal/httpcommon/request.go b/openshift/tools/vendor/golang.org/x/net/internal/httpcommon/request.go index 4b70553179..1e10f89ebf 100644 --- a/openshift/tools/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/openshift/tools/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -51,7 +51,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -399,7 +399,7 @@ type ServerRequestResult struct { // If the request should be rejected, this is a short string suitable for passing // to the http2 package's CountError function. - // It might be a bit odd to return errors this way rather than returing an error, + // It might be a bit odd to return errors this way rather than returning an error, // but this ensures we don't forget to include a CountError reason. InvalidReason string } diff --git a/openshift/tools/vendor/golang.org/x/oauth2/deviceauth.go b/openshift/tools/vendor/golang.org/x/oauth2/deviceauth.go index e99c92f39c..e783a94374 100644 --- a/openshift/tools/vendor/golang.org/x/oauth2/deviceauth.go +++ b/openshift/tools/vendor/golang.org/x/oauth2/deviceauth.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "mime" "net/http" "net/url" "strings" @@ -116,10 +117,38 @@ func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAu return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ + retrieveError := &RetrieveError{ Response: r, Body: body, } + + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") + default: + var tj struct { + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` + } + if json.Unmarshal(body, &tj) != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI + } + + return nil, retrieveError } da := &DeviceAuthResponse{} diff --git a/openshift/tools/vendor/golang.org/x/oauth2/oauth2.go b/openshift/tools/vendor/golang.org/x/oauth2/oauth2.go index de34feb844..5c527d31fd 100644 --- a/openshift/tools/vendor/golang.org/x/oauth2/oauth2.go +++ b/openshift/tools/vendor/golang.org/x/oauth2/oauth2.go @@ -9,7 +9,6 @@ package oauth2 // import "golang.org/x/oauth2" import ( - "bytes" "context" "errors" "net/http" @@ -99,7 +98,7 @@ const ( // in the POST body as application/x-www-form-urlencoded parameters. AuthStyleInParams AuthStyle = 1 - // AuthStyleInHeader sends the client_id and client_password + // AuthStyleInHeader sends the client_id and client_secret // using HTTP Basic Authorization. This is an optional style // described in the OAuth2 RFC 6749 section 2.3.1. AuthStyleInHeader AuthStyle = 2 @@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // PKCE), https://www.oauth.com/oauth2-servers/pkce/ and // https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, diff --git a/openshift/tools/vendor/golang.org/x/oauth2/pkce.go b/openshift/tools/vendor/golang.org/x/oauth2/pkce.go index cea8374d51..f99384f0f5 100644 --- a/openshift/tools/vendor/golang.org/x/oauth2/pkce.go +++ b/openshift/tools/vendor/golang.org/x/oauth2/pkce.go @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { return base64.RawURLEncoding.EncodeToString(sha[:]) } -// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// S256ChallengeOption derives a PKCE code challenge from the verifier with // method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { diff --git a/openshift/tools/vendor/golang.org/x/oauth2/token.go b/openshift/tools/vendor/golang.org/x/oauth2/token.go index 239ec32962..e995eebb5e 100644 --- a/openshift/tools/vendor/golang.org/x/oauth2/token.go +++ b/openshift/tools/vendor/golang.org/x/oauth2/token.go @@ -103,7 +103,7 @@ func (t *Token) WithExtra(extra any) *Token { } // Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a +// Extra fields are key-value pairs returned by the server as // part of the token retrieval response. func (t *Token) Extra(key string) any { if raw, ok := t.raw.(map[string]any); ok { diff --git a/openshift/tools/vendor/golang.org/x/oauth2/transport.go b/openshift/tools/vendor/golang.org/x/oauth2/transport.go index 8bbebbac9e..9922ec3316 100644 --- a/openshift/tools/vendor/golang.org/x/oauth2/transport.go +++ b/openshift/tools/vendor/golang.org/x/oauth2/transport.go @@ -58,7 +58,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { var cancelOnce sync.Once // CancelRequest does nothing. It used to be a legacy cancellation mechanism -// but now only it only logs on first use to warn that it's deprecated. +// but now only logs on first use to warn that it's deprecated. // // Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { diff --git a/openshift/tools/vendor/golang.org/x/sync/errgroup/errgroup.go b/openshift/tools/vendor/golang.org/x/sync/errgroup/errgroup.go index cb6bb9ad3b..2f45dbc86e 100644 --- a/openshift/tools/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/openshift/tools/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. +// cancellation for groups of goroutines working on subtasks of a common task. // // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // returning errors. @@ -12,8 +12,6 @@ package errgroup import ( "context" "fmt" - "runtime" - "runtime/debug" "sync" ) @@ -33,10 +31,6 @@ type Group struct { errOnce sync.Once err error - - mu sync.Mutex - panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked. - abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit). } func (g *Group) done() { @@ -56,22 +50,13 @@ func WithContext(ctx context.Context) (*Group, context.Context) { return &Group{cancel: cancel}, ctx } -// Wait blocks until all function calls from the Go method have returned -// normally, then returns the first non-nil error (if any) from them. -// -// If any of the calls panics, Wait panics with a [PanicValue]; -// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit. +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { g.cancel(g.err) } - if g.panicValue != nil { - panic(g.panicValue) - } - if g.abnormal { - runtime.Goexit() - } return g.err } @@ -81,53 +66,31 @@ func (g *Group) Wait() error { // It blocks until the new goroutine can be added without the number of // goroutines in the group exceeding the configured limit. // -// The first goroutine in the group that returns a non-nil error, panics, or -// invokes [runtime.Goexit] will cancel the associated Context, if any. +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} } - g.add(f) -} - -func (g *Group) add(f func() error) { g.wg.Add(1) go func() { defer g.done() - normalReturn := false - defer func() { - if normalReturn { - return - } - v := recover() - g.mu.Lock() - defer g.mu.Unlock() - if !g.abnormal { - if g.cancel != nil { - g.cancel(g.err) - } - g.abnormal = true - } - if v != nil && g.panicValue == nil { - switch v := v.(type) { - case error: - g.panicValue = PanicError{ - Recovered: v, - Stack: debug.Stack(), - } - default: - g.panicValue = PanicValue{ - Recovered: v, - Stack: debug.Stack(), - } - } - } - }() - err := f() - normalReturn = true - if err != nil { + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + + if err := f(); err != nil { g.errOnce.Do(func() { g.err = err if g.cancel != nil { @@ -152,7 +115,19 @@ func (g *Group) TryGo(f func() error) bool { } } - g.add(f) + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() return true } @@ -174,34 +149,3 @@ func (g *Group) SetLimit(n int) { } g.sem = make(chan token, n) } - -// PanicError wraps an error recovered from an unhandled panic -// when calling a function passed to Go or TryGo. -type PanicError struct { - Recovered error - Stack []byte // result of call to [debug.Stack] -} - -func (p PanicError) Error() string { - if len(p.Stack) > 0 { - return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack) - } - return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) -} - -func (p PanicError) Unwrap() error { return p.Recovered } - -// PanicValue wraps a value that does not implement the error interface, -// recovered from an unhandled panic when calling a function passed to Go or -// TryGo. -type PanicValue struct { - Recovered any - Stack []byte // result of call to [debug.Stack] -} - -func (p PanicValue) String() string { - if len(p.Stack) > 0 { - return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack) - } - return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/openshift/tools/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s deleted file mode 100644 index 269e173ca4..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -#include "textflag.h" - -// -// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go -// - -TEXT ·syscall6(SB),NOSPLIT,$0-88 - JMP syscall·syscall6(SB) - -TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 - JMP syscall·rawSyscall6(SB) diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/openshift/tools/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s deleted file mode 100644 index ec2acfe540..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && amd64 && gc - -#include "textflag.h" - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) - -TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctlbyname(SB) -GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/byteorder.go b/openshift/tools/vendor/golang.org/x/sys/cpu/byteorder.go deleted file mode 100644 index 271055be0b..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/byteorder.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "runtime" -) - -// byteOrder is a subset of encoding/binary.ByteOrder. -type byteOrder interface { - Uint32([]byte) uint32 - Uint64([]byte) uint64 -} - -type littleEndian struct{} -type bigEndian struct{} - -func (littleEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func (littleEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func (bigEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -} - -func (bigEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 -} - -// hostByteOrder returns littleEndian on little-endian machines and -// bigEndian on big-endian machines. -func hostByteOrder() byteOrder { - switch runtime.GOARCH { - case "386", "amd64", "amd64p32", - "alpha", - "arm", "arm64", - "loong64", - "mipsle", "mips64le", "mips64p32le", - "nios2", - "ppc64le", - "riscv", "riscv64", - "sh": - return littleEndian{} - case "armbe", "arm64be", - "m68k", - "mips", "mips64", "mips64p32", - "ppc", "ppc64", - "s390", "s390x", - "shbe", - "sparc", "sparc64": - return bigEndian{} - } - panic("unknown architecture") -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu.go deleted file mode 100644 index 63541994ef..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpu implements processor feature detection for -// various CPU architectures. -package cpu - -import ( - "os" - "strings" -) - -// Initialized reports whether the CPU features were initialized. -// -// For some GOOS/GOARCH combinations initialization of the CPU features depends -// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm -// Initialized will report false if reading the file fails. -var Initialized bool - -// CacheLinePad is used to pad structs to avoid false sharing. -type CacheLinePad struct{ _ [cacheLineSize]byte } - -// X86 contains the supported CPU features of the -// current X86/AMD64 platform. If the current platform -// is not X86/AMD64 then all feature flags are false. -// -// X86 is padded to avoid false sharing. Further the HasAVX -// and HasAVX2 are only set if the OS supports XMM and YMM -// registers in addition to the CPUID feature bit being set. -var X86 struct { - _ CacheLinePad - HasAES bool // AES hardware implementation (AES NI) - HasADX bool // Multi-precision add-carry instruction extensions - HasAVX bool // Advanced vector extension - HasAVX2 bool // Advanced vector extension 2 - HasAVX512 bool // Advanced vector extension 512 - HasAVX512F bool // Advanced vector extension 512 Foundation Instructions - HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions - HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions - HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions - HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions - HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions - HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add - HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions - HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision - HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision - HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions - HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations - HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions - HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions - HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions - HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 - HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms - HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions - HasAMXTile bool // Advanced Matrix Extension Tile instructions - HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions - HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions - HasBMI1 bool // Bit manipulation instruction set 1 - HasBMI2 bool // Bit manipulation instruction set 2 - HasCX16 bool // Compare and exchange 16 Bytes - HasERMS bool // Enhanced REP for MOVSB and STOSB - HasFMA bool // Fused-multiply-add instructions - HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. - HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM - HasPOPCNT bool // Hamming weight instruction POPCNT. - HasRDRAND bool // RDRAND instruction (on-chip random number generator) - HasRDSEED bool // RDSEED instruction (on-chip random number generator) - HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) - HasSSE3 bool // Streaming SIMD extension 3 - HasSSSE3 bool // Supplemental streaming SIMD extension 3 - HasSSE41 bool // Streaming SIMD extension 4 and 4.1 - HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add - HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions - HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions - _ CacheLinePad -} - -// ARM64 contains the supported CPU features of the -// current ARMv8(aarch64) platform. If the current platform -// is not arm64 then all feature flags are false. -var ARM64 struct { - _ CacheLinePad - HasFP bool // Floating-point instruction set (always available) - HasASIMD bool // Advanced SIMD (always available) - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - HasATOMICS bool // Atomic memory operation instruction set - HasFPHP bool // Half precision floating-point instruction set - HasASIMDHP bool // Advanced SIMD half precision instruction set - HasCPUID bool // CPUID identification scheme registers - HasASIMDRDM bool // Rounding double multiply add/subtract instruction set - HasJSCVT bool // Javascript conversion from floating-point to integer - HasFCMA bool // Floating-point multiplication and addition of complex numbers - HasLRCPC bool // Release Consistent processor consistent support - HasDCPOP bool // Persistent memory support - HasSHA3 bool // SHA3 hardware implementation - HasSM3 bool // SM3 hardware implementation - HasSM4 bool // SM4 hardware implementation - HasASIMDDP bool // Advanced SIMD double precision instruction set - HasSHA512 bool // SHA512 hardware implementation - HasSVE bool // Scalable Vector Extensions - HasSVE2 bool // Scalable Vector Extensions 2 - HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 - HasDIT bool // Data Independent Timing support - HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions - _ CacheLinePad -} - -// ARM contains the supported CPU features of the current ARM (32-bit) platform. -// All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. -var ARM struct { - _ CacheLinePad - HasSWP bool // SWP instruction support - HasHALF bool // Half-word load and store support - HasTHUMB bool // ARM Thumb instruction set - Has26BIT bool // Address space limited to 26-bits - HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support - HasFPA bool // Floating point arithmetic support - HasVFP bool // Vector floating point support - HasEDSP bool // DSP Extensions support - HasJAVA bool // Java instruction set - HasIWMMXT bool // Intel Wireless MMX technology support - HasCRUNCH bool // MaverickCrunch context switching and handling - HasTHUMBEE bool // Thumb EE instruction set - HasNEON bool // NEON instruction set - HasVFPv3 bool // Vector floating point version 3 support - HasVFPv3D16 bool // Vector floating point version 3 D8-D15 - HasTLS bool // Thread local storage support - HasVFPv4 bool // Vector floating point version 4 support - HasIDIVA bool // Integer divide instruction support in ARM mode - HasIDIVT bool // Integer divide instruction support in Thumb mode - HasVFPD32 bool // Vector floating point version 3 D15-D31 - HasLPAE bool // Large Physical Address Extensions - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - _ CacheLinePad -} - -// The booleans in Loong64 contain the correspondingly named cpu feature bit. -// The struct is padded to avoid false sharing. -var Loong64 struct { - _ CacheLinePad - HasLSX bool // support 128-bit vector extension - HasLASX bool // support 256-bit vector extension - HasCRC32 bool // support CRC instruction - HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction - HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction - _ CacheLinePad -} - -// MIPS64X contains the supported CPU features of the current mips64/mips64le -// platforms. If the current platform is not mips64/mips64le or the current -// operating system is not Linux then all feature flags are false. -var MIPS64X struct { - _ CacheLinePad - HasMSA bool // MIPS SIMD architecture - _ CacheLinePad -} - -// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. -// If the current platform is not ppc64/ppc64le then all feature flags are false. -// -// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (DARN, SCV), so there are feature bits for -// those as well. The struct is padded to avoid false sharing. -var PPC64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 - _ CacheLinePad -} - -// S390X contains the supported CPU features of the current IBM Z -// (s390x) platform. If the current platform is not IBM Z then all -// feature flags are false. -// -// S390X is padded to avoid false sharing. Further HasVX is only set -// if the OS supports vector registers in addition to the STFLE -// feature bit being set. -var S390X struct { - _ CacheLinePad - HasZARCH bool // z/Architecture mode is active [mandatory] - HasSTFLE bool // store facility list extended - HasLDISP bool // long (20-bit) displacements - HasEIMM bool // 32-bit immediates - HasDFP bool // decimal floating point - HasETF3EH bool // ETF-3 enhanced - HasMSA bool // message security assist (CPACF) - HasAES bool // KM-AES{128,192,256} functions - HasAESCBC bool // KMC-AES{128,192,256} functions - HasAESCTR bool // KMCTR-AES{128,192,256} functions - HasAESGCM bool // KMA-GCM-AES{128,192,256} functions - HasGHASH bool // KIMD-GHASH function - HasSHA1 bool // K{I,L}MD-SHA-1 functions - HasSHA256 bool // K{I,L}MD-SHA-256 functions - HasSHA512 bool // K{I,L}MD-SHA-512 functions - HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions - HasVX bool // vector facility - HasVXE bool // vector-enhancements facility 1 - _ CacheLinePad -} - -// RISCV64 contains the supported CPU features and performance characteristics for riscv64 -// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate -// the presence of RISC-V extensions. -// -// It is safe to assume that all the RV64G extensions are supported and so they are omitted from -// this structure. As riscv64 Go programs require at least RV64G, the code that populates -// this structure cannot run successfully if some of the RV64G extensions are missing. -// The struct is padded to avoid false sharing. -var RISCV64 struct { - _ CacheLinePad - HasFastMisaligned bool // Fast misaligned accesses - HasC bool // Compressed instruction-set extension - HasV bool // Vector extension compatible with RVV 1.0 - HasZba bool // Address generation instructions extension - HasZbb bool // Basic bit-manipulation extension - HasZbs bool // Single-bit instructions extension - HasZvbb bool // Vector Basic Bit-manipulation - HasZvbc bool // Vector Carryless Multiplication - HasZvkb bool // Vector Cryptography Bit-manipulation - HasZvkt bool // Vector Data-Independent Execution Latency - HasZvkg bool // Vector GCM/GMAC - HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512) - HasZvknc bool // NIST Algorithm Suite with carryless multiply - HasZvkng bool // NIST Algorithm Suite with GCM - HasZvks bool // ShangMi Algorithm Suite - HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication - HasZvksg bool // ShangMi Algorithm Suite with GCM - _ CacheLinePad -} - -func init() { - archInit() - initOptions() - processOptions() -} - -// options contains the cpu debug options that can be used in GODEBUG. -// Options are arch dependent and are added by the arch specific initOptions functions. -// Features that are mandatory for the specific GOARCH should have the Required field set -// (e.g. SSE2 on amd64). -var options []option - -// Option names should be lower case. e.g. avx instead of AVX. -type option struct { - Name string - Feature *bool - Specified bool // whether feature value was specified in GODEBUG - Enable bool // whether feature should be enabled - Required bool // whether feature is mandatory and can not be disabled -} - -func processOptions() { - env := os.Getenv("GODEBUG") -field: - for env != "" { - field := "" - i := strings.IndexByte(env, ',') - if i < 0 { - field, env = env, "" - } else { - field, env = env[:i], env[i+1:] - } - if len(field) < 4 || field[:4] != "cpu." { - continue - } - i = strings.IndexByte(field, '=') - if i < 0 { - print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") - continue - } - key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" - - var enable bool - switch value { - case "on": - enable = true - case "off": - enable = false - default: - print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") - continue field - } - - if key == "all" { - for i := range options { - options[i].Specified = true - options[i].Enable = enable || options[i].Required - } - continue field - } - - for i := range options { - if options[i].Name == key { - options[i].Specified = true - options[i].Enable = enable - continue field - } - } - - print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") - } - - for _, o := range options { - if !o.Specified { - continue - } - - if o.Enable && !*o.Feature { - print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") - continue - } - - if !o.Enable && o.Required { - print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") - continue - } - - *o.Feature = o.Enable - } -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_aix.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_aix.go deleted file mode 100644 index 9bf0c32eb6..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix - -package cpu - -const ( - // getsystemcfg constants - _SC_IMPL = 2 - _IMPL_POWER8 = 0x10000 - _IMPL_POWER9 = 0x20000 -) - -func archInit() { - impl := getsystemcfg(_SC_IMPL) - if impl&_IMPL_POWER8 != 0 { - PPC64.IsPOWER8 = true - } - if impl&_IMPL_POWER9 != 0 { - PPC64.IsPOWER8 = true - PPC64.IsPOWER9 = true - } - - Initialized = true -} - -func getsystemcfg(label int) (n uint64) { - r0, _ := callgetsystemcfg(label) - n = uint64(r0) - return -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm.go deleted file mode 100644 index 301b752e9c..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 32 - -// HWCAP/HWCAP2 bits. -// These are specific to Linux. -const ( - hwcap_SWP = 1 << 0 - hwcap_HALF = 1 << 1 - hwcap_THUMB = 1 << 2 - hwcap_26BIT = 1 << 3 - hwcap_FAST_MULT = 1 << 4 - hwcap_FPA = 1 << 5 - hwcap_VFP = 1 << 6 - hwcap_EDSP = 1 << 7 - hwcap_JAVA = 1 << 8 - hwcap_IWMMXT = 1 << 9 - hwcap_CRUNCH = 1 << 10 - hwcap_THUMBEE = 1 << 11 - hwcap_NEON = 1 << 12 - hwcap_VFPv3 = 1 << 13 - hwcap_VFPv3D16 = 1 << 14 - hwcap_TLS = 1 << 15 - hwcap_VFPv4 = 1 << 16 - hwcap_IDIVA = 1 << 17 - hwcap_IDIVT = 1 << 18 - hwcap_VFPD32 = 1 << 19 - hwcap_LPAE = 1 << 20 - hwcap_EVTSTRM = 1 << 21 - - hwcap2_AES = 1 << 0 - hwcap2_PMULL = 1 << 1 - hwcap2_SHA1 = 1 << 2 - hwcap2_SHA2 = 1 << 3 - hwcap2_CRC32 = 1 << 4 -) - -func initOptions() { - options = []option{ - {Name: "pmull", Feature: &ARM.HasPMULL}, - {Name: "sha1", Feature: &ARM.HasSHA1}, - {Name: "sha2", Feature: &ARM.HasSHA2}, - {Name: "swp", Feature: &ARM.HasSWP}, - {Name: "thumb", Feature: &ARM.HasTHUMB}, - {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, - {Name: "tls", Feature: &ARM.HasTLS}, - {Name: "vfp", Feature: &ARM.HasVFP}, - {Name: "vfpd32", Feature: &ARM.HasVFPD32}, - {Name: "vfpv3", Feature: &ARM.HasVFPv3}, - {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, - {Name: "vfpv4", Feature: &ARM.HasVFPv4}, - {Name: "half", Feature: &ARM.HasHALF}, - {Name: "26bit", Feature: &ARM.Has26BIT}, - {Name: "fastmul", Feature: &ARM.HasFASTMUL}, - {Name: "fpa", Feature: &ARM.HasFPA}, - {Name: "edsp", Feature: &ARM.HasEDSP}, - {Name: "java", Feature: &ARM.HasJAVA}, - {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, - {Name: "crunch", Feature: &ARM.HasCRUNCH}, - {Name: "neon", Feature: &ARM.HasNEON}, - {Name: "idivt", Feature: &ARM.HasIDIVT}, - {Name: "idiva", Feature: &ARM.HasIDIVA}, - {Name: "lpae", Feature: &ARM.HasLPAE}, - {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, - {Name: "aes", Feature: &ARM.HasAES}, - {Name: "crc32", Feature: &ARM.HasCRC32}, - } - -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.go deleted file mode 100644 index af2aa99f9f..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "runtime" - -// cacheLineSize is used to prevent false sharing of cache lines. -// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. -// It doesn't cost much and is much more future-proof. -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "fp", Feature: &ARM64.HasFP}, - {Name: "asimd", Feature: &ARM64.HasASIMD}, - {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, - {Name: "aes", Feature: &ARM64.HasAES}, - {Name: "fphp", Feature: &ARM64.HasFPHP}, - {Name: "jscvt", Feature: &ARM64.HasJSCVT}, - {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, - {Name: "pmull", Feature: &ARM64.HasPMULL}, - {Name: "sha1", Feature: &ARM64.HasSHA1}, - {Name: "sha2", Feature: &ARM64.HasSHA2}, - {Name: "sha3", Feature: &ARM64.HasSHA3}, - {Name: "sha512", Feature: &ARM64.HasSHA512}, - {Name: "sm3", Feature: &ARM64.HasSM3}, - {Name: "sm4", Feature: &ARM64.HasSM4}, - {Name: "sve", Feature: &ARM64.HasSVE}, - {Name: "sve2", Feature: &ARM64.HasSVE2}, - {Name: "crc32", Feature: &ARM64.HasCRC32}, - {Name: "atomics", Feature: &ARM64.HasATOMICS}, - {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, - {Name: "cpuid", Feature: &ARM64.HasCPUID}, - {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, - {Name: "fcma", Feature: &ARM64.HasFCMA}, - {Name: "dcpop", Feature: &ARM64.HasDCPOP}, - {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, - {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, - {Name: "dit", Feature: &ARM64.HasDIT}, - {Name: "i8mm", Feature: &ARM64.HasI8MM}, - } -} - -func archInit() { - switch runtime.GOOS { - case "freebsd": - readARM64Registers() - case "linux", "netbsd", "openbsd": - doinit() - default: - // Many platforms don't seem to allow reading these registers. - setMinimalFeatures() - } -} - -// setMinimalFeatures fakes the minimal ARM64 features expected by -// TestARM64minimalFeatures. -func setMinimalFeatures() { - ARM64.HasASIMD = true - ARM64.HasFP = true -} - -func readARM64Registers() { - Initialized = true - - parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) -} - -func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { - // ID_AA64ISAR0_EL1 - switch extractBits(isar0, 4, 7) { - case 1: - ARM64.HasAES = true - case 2: - ARM64.HasAES = true - ARM64.HasPMULL = true - } - - switch extractBits(isar0, 8, 11) { - case 1: - ARM64.HasSHA1 = true - } - - switch extractBits(isar0, 12, 15) { - case 1: - ARM64.HasSHA2 = true - case 2: - ARM64.HasSHA2 = true - ARM64.HasSHA512 = true - } - - switch extractBits(isar0, 16, 19) { - case 1: - ARM64.HasCRC32 = true - } - - switch extractBits(isar0, 20, 23) { - case 2: - ARM64.HasATOMICS = true - } - - switch extractBits(isar0, 28, 31) { - case 1: - ARM64.HasASIMDRDM = true - } - - switch extractBits(isar0, 32, 35) { - case 1: - ARM64.HasSHA3 = true - } - - switch extractBits(isar0, 36, 39) { - case 1: - ARM64.HasSM3 = true - } - - switch extractBits(isar0, 40, 43) { - case 1: - ARM64.HasSM4 = true - } - - switch extractBits(isar0, 44, 47) { - case 1: - ARM64.HasASIMDDP = true - } - - // ID_AA64ISAR1_EL1 - switch extractBits(isar1, 0, 3) { - case 1: - ARM64.HasDCPOP = true - } - - switch extractBits(isar1, 12, 15) { - case 1: - ARM64.HasJSCVT = true - } - - switch extractBits(isar1, 16, 19) { - case 1: - ARM64.HasFCMA = true - } - - switch extractBits(isar1, 20, 23) { - case 1: - ARM64.HasLRCPC = true - } - - switch extractBits(isar1, 52, 55) { - case 1: - ARM64.HasI8MM = true - } - - // ID_AA64PFR0_EL1 - switch extractBits(pfr0, 16, 19) { - case 0: - ARM64.HasFP = true - case 1: - ARM64.HasFP = true - ARM64.HasFPHP = true - } - - switch extractBits(pfr0, 20, 23) { - case 0: - ARM64.HasASIMD = true - case 1: - ARM64.HasASIMD = true - ARM64.HasASIMDHP = true - } - - switch extractBits(pfr0, 32, 35) { - case 1: - ARM64.HasSVE = true - - parseARM64SVERegister(getzfr0()) - } - - switch extractBits(pfr0, 48, 51) { - case 1: - ARM64.HasDIT = true - } -} - -func parseARM64SVERegister(zfr0 uint64) { - switch extractBits(zfr0, 0, 3) { - case 1: - ARM64.HasSVE2 = true - } -} - -func extractBits(data uint64, start, end uint) uint { - return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s deleted file mode 100644 index 22cc99844a..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -#include "textflag.h" - -// func getisar0() uint64 -TEXT ·getisar0(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 - MOVD R0, ret+0(FP) - RET - -// func getisar1() uint64 -TEXT ·getisar1(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 - MOVD R0, ret+0(FP) - RET - -// func getpfr0() uint64 -TEXT ·getpfr0(SB),NOSPLIT,$0-8 - // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 - MOVD R0, ret+0(FP) - RET - -// func getzfr0() uint64 -TEXT ·getzfr0(SB),NOSPLIT,$0-8 - // get SVE Feature Register 0 into x0 - // mrs x0, ID_AA64ZFR0_EL1 = d5380480 - WORD $0xd5380480 - MOVD R0, ret+0(FP) - RET diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go deleted file mode 100644 index b838cb9e95..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && amd64 && gc - -package cpu - -// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl -// call (see issue 43089). It also restricts AVX512 support for Darwin to -// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). -// -// Background: -// Darwin implements a special mechanism to economize on thread state when -// AVX512 specific registers are not in use. This scheme minimizes state when -// preempting threads that haven't yet used any AVX512 instructions, but adds -// special requirements to check for AVX512 hardware support at runtime (e.g. -// via sysctl call or commpage inspection). See issue 43089 and link below for -// full background: -// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 -// -// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 -// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption -// of the AVX512 mask registers (K0-K7) upon signal return. For this reason -// AVX512 is considered unsafe to use on Darwin for kernel versions prior to -// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. -func darwinSupportsAVX512() bool { - return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) -} - -// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies -func darwinKernelVersionCheck(major, minor, patch int) bool { - var release [256]byte - err := darwinOSRelease(&release) - if err != nil { - return false - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return false - } - case b == 0: - break Loop - default: - return false - } - } - if c != 2 { - return false - } - return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go deleted file mode 100644 index 6ac6e1efb2..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -package cpu - -func getisar0() uint64 -func getisar1() uint64 -func getpfr0() uint64 -func getzfr0() uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go deleted file mode 100644 index c8ae6ddc15..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return true } - -// The following feature detection functions are defined in cpu_s390x.s. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList -func kmQuery() queryResult -func kmcQuery() queryResult -func kmctrQuery() queryResult -func kmaQuery() queryResult -func kimdQuery() queryResult -func klmdQuery() queryResult diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go deleted file mode 100644 index 32a44514e2..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gc - -package cpu - -// cpuid is implemented in cpu_gc_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) - -// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func xgetbv() (eax, edx uint32) diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s deleted file mode 100644 index ce208ce6d6..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gc - -#include "textflag.h" - -// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid(SB), NOSPLIT, $0-24 - MOVL eaxArg+0(FP), AX - MOVL ecxArg+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB), NOSPLIT, $0-8 - MOVL $0, CX - XGETBV - MOVL AX, eax+0(FP) - MOVL DX, edx+4(FP) - RET diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go deleted file mode 100644 index 7f1946780b..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo - -package cpu - -func getisar0() uint64 { return 0 } -func getisar1() uint64 { return 0 } -func getpfr0() uint64 { return 0 } diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go deleted file mode 100644 index 9526d2ce3a..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return false } - -// TODO(mundaym): the following feature detection functions are currently -// stubs. See https://golang.org/cl/162887 for how to fix this. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList { panic("not implemented for gccgo") } -func kmQuery() queryResult { panic("not implemented for gccgo") } -func kmcQuery() queryResult { panic("not implemented for gccgo") } -func kmctrQuery() queryResult { panic("not implemented for gccgo") } -func kmaQuery() queryResult { panic("not implemented for gccgo") } -func kimdQuery() queryResult { panic("not implemented for gccgo") } -func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c deleted file mode 100644 index 3f73a05dcf..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo - -#include -#include -#include - -// Need to wrap __get_cpuid_count because it's declared as static. -int -gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); -} - -#pragma GCC diagnostic ignored "-Wunknown-pragmas" -#pragma GCC push_options -#pragma GCC target("xsave") -#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) - -// xgetbv reads the contents of an XCR (Extended Control Register) -// specified in the ECX register into registers EDX:EAX. -// Currently, the only supported value for XCR is 0. -void -gccgoXgetbv(uint32_t *eax, uint32_t *edx) -{ - uint64_t v = _xgetbv(0); - *eax = v & 0xffffffff; - *edx = v >> 32; -} - -#pragma clang attribute pop -#pragma GCC pop_options diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go deleted file mode 100644 index 170d21ddfd..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo - -package cpu - -//extern gccgoGetCpuidCount -func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) - -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { - var a, b, c, d uint32 - gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) - return a, b, c, d -} - -//extern gccgoXgetbv -func gccgoXgetbv(eax, edx *uint32) - -func xgetbv() (eax, edx uint32) { - var a, d uint32 - gccgoXgetbv(&a, &d) - return a, d -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux.go deleted file mode 100644 index 743eb54354..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !386 && !amd64 && !amd64p32 && !arm64 - -package cpu - -func archInit() { - if err := readHWCAP(); err != nil { - return - } - doinit() - Initialized = true -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go deleted file mode 100644 index 2057006dce..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -func doinit() { - ARM.HasSWP = isSet(hwCap, hwcap_SWP) - ARM.HasHALF = isSet(hwCap, hwcap_HALF) - ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) - ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) - ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) - ARM.HasFPA = isSet(hwCap, hwcap_FPA) - ARM.HasVFP = isSet(hwCap, hwcap_VFP) - ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) - ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) - ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) - ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) - ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) - ARM.HasNEON = isSet(hwCap, hwcap_NEON) - ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) - ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) - ARM.HasTLS = isSet(hwCap, hwcap_TLS) - ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) - ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) - ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) - ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) - ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) - ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM.HasAES = isSet(hwCap2, hwcap2_AES) - ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) - ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) - ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) - ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go deleted file mode 100644 index f1caf0f78e..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "strings" - "syscall" -) - -// HWCAP/HWCAP2 bits. These are exposed by Linux. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 - hwcap_DIT = 1 << 24 - - hwcap2_SVE2 = 1 << 1 - hwcap2_I8MM = 1 << 13 -) - -// linuxKernelCanEmulateCPUID reports whether we're running -// on Linux 4.11+. Ideally we'd like to ask the question about -// whether the current kernel contains -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 -// but the version number will have to do. -func linuxKernelCanEmulateCPUID() bool { - var un syscall.Utsname - syscall.Uname(&un) - var sb strings.Builder - for _, b := range un.Release[:] { - if b == 0 { - break - } - sb.WriteByte(byte(b)) - } - major, minor, _, ok := parseRelease(sb.String()) - return ok && (major > 4 || major == 4 && minor >= 11) -} - -func doinit() { - if err := readHWCAP(); err != nil { - // We failed to read /proc/self/auxv. This can happen if the binary has - // been given extra capabilities(7) with /bin/setcap. - // - // When this happens, we have two options. If the Linux kernel is new - // enough (4.11+), we can read the arm64 registers directly which'll - // trap into the kernel and then return back to userspace. - // - // But on older kernels, such as Linux 4.4.180 as used on many Synology - // devices, calling readARM64Registers (specifically getisar0) will - // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo - // instead. - // - // See golang/go#57336. - if linuxKernelCanEmulateCPUID() { - readARM64Registers() - } else { - readLinuxProcCPUInfo() - } - return - } - - // HWCAP feature bits - ARM64.HasFP = isSet(hwCap, hwcap_FP) - ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwCap, hwcap_AES) - ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwCap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) - ARM64.HasDIT = isSet(hwCap, hwcap_DIT) - - // HWCAP2 feature bits - ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) - ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go deleted file mode 100644 index 4f34114329..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -// HWCAP bits. These are exposed by the Linux kernel. -const ( - hwcap_LOONGARCH_LSX = 1 << 4 - hwcap_LOONGARCH_LASX = 1 << 5 -) - -func doinit() { - // TODO: Features that require kernel support like LSX and LASX can - // be detected here once needed in std library or by the compiler. - Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX) - Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX) -} - -func hwcIsSet(hwc uint, val uint) bool { - return hwc&val != 0 -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go deleted file mode 100644 index 4686c1d541..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (mips64 || mips64le) - -package cpu - -// HWCAP bits. These are exposed by the Linux kernel 5.4. -const ( - // CPU features - hwcap_MIPS_MSA = 1 << 1 -) - -func doinit() { - // HWCAP feature bits - MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go deleted file mode 100644 index a428dec9cd..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 - -package cpu - -func doinit() {} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go deleted file mode 100644 index 197188e67f..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (ppc64 || ppc64le) - -package cpu - -// HWCAP/HWCAP2 bits. These are exposed by the kernel. -const ( - // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 - - // CPU features - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 -) - -func doinit() { - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go deleted file mode 100644 index ad741536f3..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe -// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. -// -// A note on detection of the Vector extension using HWCAP. -// -// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. -// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe -// syscall is not available then neither is the Vector extension (which needs kernel support). -// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. -// However, some RISC-V board manufacturers ship boards with an older kernel on top of which -// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe -// patches. These kernels advertise support for the Vector extension using HWCAP. Falling -// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not -// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. -// -// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by -// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board -// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified -// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use -// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector -// extension are binary incompatible. HWCAP can then not be used in isolation to populate the -// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. -// -// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector -// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype -// register. This check would allow us to safely detect version 1.0 of the Vector extension -// with HWCAP, if riscv_hwprobe were not available. However, the check cannot -// be added until the assembler supports the Vector instructions. -// -// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the -// extensions it advertises support for are explicitly versioned. It's also worth noting that -// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. -// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority -// of RISC-V extensions. -// -// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. - -// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must -// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall -// here. - -const ( - // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. - riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 - riscv_HWPROBE_IMA_C = 0x2 - riscv_HWPROBE_IMA_V = 0x4 - riscv_HWPROBE_EXT_ZBA = 0x8 - riscv_HWPROBE_EXT_ZBB = 0x10 - riscv_HWPROBE_EXT_ZBS = 0x20 - riscv_HWPROBE_EXT_ZVBB = 0x20000 - riscv_HWPROBE_EXT_ZVBC = 0x40000 - riscv_HWPROBE_EXT_ZVKB = 0x80000 - riscv_HWPROBE_EXT_ZVKG = 0x100000 - riscv_HWPROBE_EXT_ZVKNED = 0x200000 - riscv_HWPROBE_EXT_ZVKNHB = 0x800000 - riscv_HWPROBE_EXT_ZVKSED = 0x1000000 - riscv_HWPROBE_EXT_ZVKSH = 0x2000000 - riscv_HWPROBE_EXT_ZVKT = 0x4000000 - riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 - riscv_HWPROBE_MISALIGNED_FAST = 0x3 - riscv_HWPROBE_MISALIGNED_MASK = 0x7 -) - -const ( - // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. - sys_RISCV_HWPROBE = 258 -) - -// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. -type riscvHWProbePairs struct { - key int64 - value uint64 -} - -const ( - // CPU features - hwcap_RISCV_ISA_C = 1 << ('C' - 'A') -) - -func doinit() { - // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key - // field should be initialised with one of the key constants defined above, e.g., - // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. - // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. - - pairs := []riscvHWProbePairs{ - {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, - {riscv_HWPROBE_KEY_CPUPERF_0, 0}, - } - - // This call only indicates that extensions are supported if they are implemented on all cores. - if riscvHWProbe(pairs, 0) { - if pairs[0].key != -1 { - v := uint(pairs[0].value) - RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) - RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) - RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) - RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) - RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) - RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB) - RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC) - RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB) - RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG) - RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT) - // Cryptography shorthand extensions - RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) && - isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt - RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc - RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg - RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) && - isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt - RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc - RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg - } - if pairs[1].key != -1 { - v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK - RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST - } - } - - // Let's double check with HWCAP if the C extension does not appear to be supported. - // This may happen if we're running on a kernel older than 6.4. - - if !RISCV64.HasC { - RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) - } -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} - -// riscvHWProbe is a simplified version of the generated wrapper function found in -// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the -// cpuCount and cpus parameters which we do not need. We always want to pass 0 for -// these parameters here so the kernel only reports the extensions that are present -// on all cores. -func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { - var _zero uintptr - var p0 unsafe.Pointer - if len(pairs) > 0 { - p0 = unsafe.Pointer(&pairs[0]) - } else { - p0 = unsafe.Pointer(&_zero) - } - - _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) - return e1 == 0 -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go deleted file mode 100644 index 1517ac61d3..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const ( - // bit mask values from /usr/include/bits/hwcap.h - hwcap_ZARCH = 2 - hwcap_STFLE = 4 - hwcap_MSA = 8 - hwcap_LDISP = 16 - hwcap_EIMM = 32 - hwcap_DFP = 64 - hwcap_ETF3EH = 256 - hwcap_VX = 2048 - hwcap_VXE = 8192 -) - -func initS390Xbase() { - // test HWCAP bit vector - has := func(featureMask uint) bool { - return hwCap&featureMask == featureMask - } - - // mandatory - S390X.HasZARCH = has(hwcap_ZARCH) - - // optional - S390X.HasSTFLE = has(hwcap_STFLE) - S390X.HasLDISP = has(hwcap_LDISP) - S390X.HasEIMM = has(hwcap_EIMM) - S390X.HasETF3EH = has(hwcap_ETF3EH) - S390X.HasDFP = has(hwcap_DFP) - S390X.HasMSA = has(hwcap_MSA) - S390X.HasVX = has(hwcap_VX) - if S390X.HasVX { - S390X.HasVXE = has(hwcap_VXE) - } -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.go deleted file mode 100644 index 45ecb29ae7..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build loong64 - -package cpu - -const cacheLineSize = 64 - -// Bit fields for CPUCFG registers, Related reference documents: -// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg -const ( - // CPUCFG1 bits - cpucfg1_CRC32 = 1 << 25 - - // CPUCFG2 bits - cpucfg2_LAM_BH = 1 << 27 - cpucfg2_LAMCAS = 1 << 28 -) - -func initOptions() { - options = []option{ - {Name: "lsx", Feature: &Loong64.HasLSX}, - {Name: "lasx", Feature: &Loong64.HasLASX}, - {Name: "crc32", Feature: &Loong64.HasCRC32}, - {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, - {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, - } - - // The CPUCFG data on Loong64 only reflects the hardware capabilities, - // not the kernel support status, so features such as LSX and LASX that - // require kernel support cannot be obtained from the CPUCFG data. - // - // These features only require hardware capability support and do not - // require kernel specific support, so they can be obtained directly - // through CPUCFG - cfg1 := get_cpucfg(1) - cfg2 := get_cpucfg(2) - - Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) - Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS) - Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH) -} - -func get_cpucfg(reg uint32) uint32 - -func cfgIsSet(cfg uint32, val uint32) bool { - return cfg&val != 0 -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.s deleted file mode 100644 index 71cbaf1ce2..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.s +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// func get_cpucfg(reg uint32) uint32 -TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0 - MOVW reg+0(FP), R5 - // CPUCFG R5, R4 = 0x00006ca4 - WORD $0x00006ca4 - MOVW R4, ret+8(FP) - RET diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_mips64x.go deleted file mode 100644 index fedb00cc4c..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips64 || mips64le - -package cpu - -const cacheLineSize = 32 - -func initOptions() { - options = []option{ - {Name: "msa", Feature: &MIPS64X.HasMSA}, - } -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_mipsx.go deleted file mode 100644 index ffb4ec7eb3..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips || mipsle - -package cpu - -const cacheLineSize = 32 - -func initOptions() {} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go deleted file mode 100644 index ebfb3fc8e7..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - _CTL_QUERY = -2 - - _SYSCTL_VERS_1 = 0x1000000 -) - -var _zero uintptr - -func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(_p0), - uintptr(len(mib)), - uintptr(unsafe.Pointer(old)), - uintptr(unsafe.Pointer(oldlen)), - uintptr(unsafe.Pointer(new)), - uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -type sysctlNode struct { - Flags uint32 - Num int32 - Name [32]int8 - Ver uint32 - __rsvd uint32 - Un [16]byte - _sysctl_size [8]byte - _sysctl_func [8]byte - _sysctl_parent [8]byte - _sysctl_desc [8]byte -} - -func sysctlNodes(mib []int32) ([]sysctlNode, error) { - var olen uintptr - - // Get a list of all sysctl nodes below the given MIB by performing - // a sysctl for the given MIB with CTL_QUERY appended. - mib = append(mib, _CTL_QUERY) - qnode := sysctlNode{Flags: _SYSCTL_VERS_1} - qp := (*byte)(unsafe.Pointer(&qnode)) - sz := unsafe.Sizeof(qnode) - if err := sysctl(mib, nil, &olen, qp, sz); err != nil { - return nil, err - } - - // Now that we know the size, get the actual nodes. - nodes := make([]sysctlNode, olen/sz) - np := (*byte)(unsafe.Pointer(&nodes[0])) - if err := sysctl(mib, np, &olen, qp, sz); err != nil { - return nil, err - } - - return nodes, nil -} - -func nametomib(name string) ([]int32, error) { - // Split name into components. - var parts []string - last := 0 - for i := 0; i < len(name); i++ { - if name[i] == '.' { - parts = append(parts, name[last:i]) - last = i + 1 - } - } - parts = append(parts, name[last:]) - - mib := []int32{} - // Discover the nodes and construct the MIB OID. - for partno, part := range parts { - nodes, err := sysctlNodes(mib) - if err != nil { - return nil, err - } - for _, node := range nodes { - n := make([]byte, 0) - for i := range node.Name { - if node.Name[i] != 0 { - n = append(n, byte(node.Name[i])) - } - } - if string(n) == part { - mib = append(mib, int32(node.Num)) - break - } - } - if len(mib) != partno+1 { - return nil, err - } - } - - return mib, nil -} - -// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's -type aarch64SysctlCPUID struct { - midr uint64 /* Main ID Register */ - revidr uint64 /* Revision ID Register */ - mpidr uint64 /* Multiprocessor Affinity Register */ - aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ - aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ - aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ - aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ - aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ - aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ - aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ - aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ - aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ - aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ - mvfr0 uint32 /* Media and VFP Feature Register 0 */ - mvfr1 uint32 /* Media and VFP Feature Register 1 */ - mvfr2 uint32 /* Media and VFP Feature Register 2 */ - pad uint32 - clidr uint64 /* Cache Level ID Register */ - ctr uint64 /* Cache Type Register */ -} - -func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { - mib, err := nametomib(name) - if err != nil { - return nil, err - } - - out := aarch64SysctlCPUID{} - n := unsafe.Sizeof(out) - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(unsafe.Pointer(&mib[0])), - uintptr(len(mib)), - uintptr(unsafe.Pointer(&out)), - uintptr(unsafe.Pointer(&n)), - uintptr(0), - uintptr(0)) - if errno != 0 { - return nil, errno - } - return &out, nil -} - -func doinit() { - cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") - if err != nil { - setMinimalFeatures() - return - } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) - - Initialized = true -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go deleted file mode 100644 index 85b64d5ccb..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - // From OpenBSD's sys/sysctl.h. - _CTL_MACHDEP = 7 - - // From OpenBSD's machine/cpu.h. - _CPU_ID_AA64ISAR0 = 2 - _CPU_ID_AA64ISAR1 = 3 -) - -// Implemented in the runtime package (runtime/sys_openbsd3.go) -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) - -//go:linkname syscall_syscall6 syscall.syscall6 - -func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -var libc_sysctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" - -func sysctlUint64(mib []uint32) (uint64, bool) { - var out uint64 - nout := unsafe.Sizeof(out) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { - return 0, false - } - return out, true -} - -func doinit() { - setMinimalFeatures() - - // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. - isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) - if !ok { - return - } - isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) - if !ok { - return - } - parseARM64SystemRegisters(isar0, isar1, 0) - - Initialized = true -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s deleted file mode 100644 index 054ba05d60..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm.go deleted file mode 100644 index e9ecf2a456..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && arm - -package cpu - -func archInit() {} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go deleted file mode 100644 index 5341e7f88d..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && !netbsd && !openbsd && arm64 - -package cpu - -func doinit() {} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go deleted file mode 100644 index 5f8f2419ab..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && (mips64 || mips64le) - -package cpu - -func archInit() { - Initialized = true -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go deleted file mode 100644 index 89608fba27..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !aix && !linux && (ppc64 || ppc64le) - -package cpu - -func archInit() { - PPC64.IsPOWER8 = true - Initialized = true -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go deleted file mode 100644 index 5ab87808f7..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && riscv64 - -package cpu - -func archInit() { - Initialized = true -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_x86.go deleted file mode 100644 index a0fd7e2f75..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_other_x86.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) - -package cpu - -func darwinSupportsAVX512() bool { - panic("only implemented for gc && amd64 && darwin") -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go deleted file mode 100644 index c14f12b149..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ppc64 || ppc64le - -package cpu - -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "darn", Feature: &PPC64.HasDARN}, - {Name: "scv", Feature: &PPC64.HasSCV}, - } -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_riscv64.go deleted file mode 100644 index 0f617aef54..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build riscv64 - -package cpu - -const cacheLineSize = 64 - -func initOptions() { - options = []option{ - {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, - {Name: "c", Feature: &RISCV64.HasC}, - {Name: "v", Feature: &RISCV64.HasV}, - {Name: "zba", Feature: &RISCV64.HasZba}, - {Name: "zbb", Feature: &RISCV64.HasZbb}, - {Name: "zbs", Feature: &RISCV64.HasZbs}, - // RISC-V Cryptography Extensions - {Name: "zvbb", Feature: &RISCV64.HasZvbb}, - {Name: "zvbc", Feature: &RISCV64.HasZvbc}, - {Name: "zvkb", Feature: &RISCV64.HasZvkb}, - {Name: "zvkg", Feature: &RISCV64.HasZvkg}, - {Name: "zvkt", Feature: &RISCV64.HasZvkt}, - {Name: "zvkn", Feature: &RISCV64.HasZvkn}, - {Name: "zvknc", Feature: &RISCV64.HasZvknc}, - {Name: "zvkng", Feature: &RISCV64.HasZvkng}, - {Name: "zvks", Feature: &RISCV64.HasZvks}, - {Name: "zvksc", Feature: &RISCV64.HasZvksc}, - {Name: "zvksg", Feature: &RISCV64.HasZvksg}, - } -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.go deleted file mode 100644 index 5881b8833f..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 256 - -func initOptions() { - options = []option{ - {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, - {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, - {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, - {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, - {Name: "dfp", Feature: &S390X.HasDFP}, - {Name: "etf3eh", Feature: &S390X.HasETF3EH}, - {Name: "msa", Feature: &S390X.HasMSA}, - {Name: "aes", Feature: &S390X.HasAES}, - {Name: "aescbc", Feature: &S390X.HasAESCBC}, - {Name: "aesctr", Feature: &S390X.HasAESCTR}, - {Name: "aesgcm", Feature: &S390X.HasAESGCM}, - {Name: "ghash", Feature: &S390X.HasGHASH}, - {Name: "sha1", Feature: &S390X.HasSHA1}, - {Name: "sha256", Feature: &S390X.HasSHA256}, - {Name: "sha3", Feature: &S390X.HasSHA3}, - {Name: "sha512", Feature: &S390X.HasSHA512}, - {Name: "vx", Feature: &S390X.HasVX}, - {Name: "vxe", Feature: &S390X.HasVXE}, - } -} - -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // mandatory facilities - zarch facility = 1 // z architecture mode is active - stflef facility = 7 // store-facility-list-extended - ldisp facility = 18 // long-displacement - eimm facility = 21 // extended-immediate - - // miscellaneous facilities - dfp facility = 42 // decimal-floating-point - etf3eh facility = 30 // extended-translation 3 enhancement - - // cryptography facilities - msa facility = 17 // message-security-assist - msa3 facility = 76 // message-security-assist extension 3 - msa4 facility = 77 // message-security-assist extension 4 - msa5 facility = 57 // message-security-assist extension 5 - msa8 facility = 146 // message-security-assist extension 8 - msa9 facility = 155 // message-security-assist extension 9 - - // vector facilities - vx facility = 129 // vector facility - vxe facility = 135 // vector-enhancements 1 - vxe2 facility = 148 // vector-enhancements 2 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { - initS390Xbase() - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s deleted file mode 100644 index 1fb4b70133..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -#include "textflag.h" - -// func stfle() facilityList -TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 - MOVD $ret+0(FP), R1 - MOVD $3, R0 // last doubleword index to store - XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) - WORD $0xb2b01000 // store facility list extended (STFLE) - RET - -// func kmQuery() queryResult -TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KM-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92E0024 // cipher message (KM) - RET - -// func kmcQuery() queryResult -TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMC-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92F0024 // cipher message with chaining (KMC) - RET - -// func kmctrQuery() queryResult -TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMCTR-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92D4024 // cipher message with counter (KMCTR) - RET - -// func kmaQuery() queryResult -TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMA-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xb9296024 // cipher message with authentication (KMA) - RET - -// func kimdQuery() queryResult -TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KIMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93E0024 // compute intermediate message digest (KIMD) - RET - -// func klmdQuery() queryResult -TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KLMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93F0024 // compute last message digest (KLMD) - RET diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go deleted file mode 100644 index 384787ea30..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build wasm - -package cpu - -// We're compiling the cpu package for an unknown (software-abstracted) CPU. -// Make CacheLinePad an empty struct and hope that the usual struct alignment -// rules are good enough. - -const cacheLineSize = 0 - -func initOptions() {} - -func archInit() {} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_x86.go b/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_x86.go deleted file mode 100644 index 1e642f3304..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build 386 || amd64 || amd64p32 - -package cpu - -import "runtime" - -const cacheLineSize = 64 - -func initOptions() { - options = []option{ - {Name: "adx", Feature: &X86.HasADX}, - {Name: "aes", Feature: &X86.HasAES}, - {Name: "avx", Feature: &X86.HasAVX}, - {Name: "avx2", Feature: &X86.HasAVX2}, - {Name: "avx512", Feature: &X86.HasAVX512}, - {Name: "avx512f", Feature: &X86.HasAVX512F}, - {Name: "avx512cd", Feature: &X86.HasAVX512CD}, - {Name: "avx512er", Feature: &X86.HasAVX512ER}, - {Name: "avx512pf", Feature: &X86.HasAVX512PF}, - {Name: "avx512vl", Feature: &X86.HasAVX512VL}, - {Name: "avx512bw", Feature: &X86.HasAVX512BW}, - {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, - {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, - {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, - {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, - {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, - {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, - {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, - {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, - {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, - {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, - {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, - {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, - {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, - {Name: "amxtile", Feature: &X86.HasAMXTile}, - {Name: "amxint8", Feature: &X86.HasAMXInt8}, - {Name: "amxbf16", Feature: &X86.HasAMXBF16}, - {Name: "bmi1", Feature: &X86.HasBMI1}, - {Name: "bmi2", Feature: &X86.HasBMI2}, - {Name: "cx16", Feature: &X86.HasCX16}, - {Name: "erms", Feature: &X86.HasERMS}, - {Name: "fma", Feature: &X86.HasFMA}, - {Name: "osxsave", Feature: &X86.HasOSXSAVE}, - {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, - {Name: "popcnt", Feature: &X86.HasPOPCNT}, - {Name: "rdrand", Feature: &X86.HasRDRAND}, - {Name: "rdseed", Feature: &X86.HasRDSEED}, - {Name: "sse3", Feature: &X86.HasSSE3}, - {Name: "sse41", Feature: &X86.HasSSE41}, - {Name: "sse42", Feature: &X86.HasSSE42}, - {Name: "ssse3", Feature: &X86.HasSSSE3}, - {Name: "avxifma", Feature: &X86.HasAVXIFMA}, - {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, - {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, - - // These capabilities should always be enabled on amd64: - {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, - } -} - -func archInit() { - - Initialized = true - - maxID, _, _, _ := cpuid(0, 0) - - if maxID < 1 { - return - } - - _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasCX16 = isSet(13, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) - - var osSupportsAVX, osSupportsAVX512 bool - // For XGETBV, OSXSAVE bit is required and sufficient. - if X86.HasOSXSAVE { - eax, _ := xgetbv() - // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) - - if runtime.GOOS == "darwin" { - // Darwin requires special AVX512 checks, see cpu_darwin_x86.go - osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() - } else { - // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) - } - } - - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX - - if maxID < 7 { - return - } - - eax7, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) - - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension - if X86.HasAVX512 { - X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) - } - - X86.HasAMXTile = isSet(24, edx7) - X86.HasAMXInt8 = isSet(25, edx7) - X86.HasAMXBF16 = isSet(22, edx7) - - // These features depend on the second level of extended features. - if eax7 >= 1 { - eax71, _, _, edx71 := cpuid(7, 1) - if X86.HasAVX512 { - X86.HasAVX512BF16 = isSet(5, eax71) - } - if X86.HasAVX { - X86.HasAVXIFMA = isSet(23, eax71) - X86.HasAVXVNNI = isSet(4, eax71) - X86.HasAVXVNNIInt8 = isSet(4, edx71) - } - } -} - -func isSet(bitpos uint, value uint32) bool { - return value&(1<> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - -func readHWCAP() error { - // For Go 1.21+, get auxv from the Go runtime. - if a := getAuxv(); len(a) > 0 { - for len(a) >= 2 { - tag, val := a[0], uint(a[1]) - a = a[2:] - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil - } - - buf, err := os.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false. On some - // architectures (e.g. arm64) doinit() implements a fallback - // readout and will set Initialized = true again. - return err - } - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/parse.go b/openshift/tools/vendor/golang.org/x/sys/cpu/parse.go deleted file mode 100644 index 56a7e1a176..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/parse.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "strconv" - -// parseRelease parses a dot-separated version number. It follows the semver -// syntax, but allows the minor and patch versions to be elided. -// -// This is a copy of the Go runtime's parseRelease from -// https://golang.org/cl/209597. -func parseRelease(rel string) (major, minor, patch int, ok bool) { - // Strip anything after a dash or plus. - for i := range len(rel) { - if rel[i] == '-' || rel[i] == '+' { - rel = rel[:i] - break - } - } - - next := func() (int, bool) { - for i := range len(rel) { - if rel[i] == '.' { - ver, err := strconv.Atoi(rel[:i]) - rel = rel[i+1:] - return ver, err == nil - } - } - ver, err := strconv.Atoi(rel) - rel = "" - return ver, err == nil - } - if major, ok = next(); !ok || rel == "" { - return - } - if minor, ok = next(); !ok || rel == "" { - return - } - patch, ok = next() - return -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/openshift/tools/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go deleted file mode 100644 index 4cd64c7042..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && arm64 - -package cpu - -import ( - "errors" - "io" - "os" - "strings" -) - -func readLinuxProcCPUInfo() error { - f, err := os.Open("/proc/cpuinfo") - if err != nil { - return err - } - defer f.Close() - - var buf [1 << 10]byte // enough for first CPU - n, err := io.ReadFull(f, buf[:]) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - in := string(buf[:n]) - const features = "\nFeatures : " - i := strings.Index(in, features) - if i == -1 { - return errors.New("no CPU features found") - } - in = in[i+len(features):] - if i := strings.Index(in, "\n"); i != -1 { - in = in[:i] - } - m := map[string]*bool{} - - initOptions() // need it early here; it's harmless to call twice - for _, o := range options { - m[o.Name] = o.Feature - } - // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". - m["evtstrm"] = &ARM64.HasEVTSTRM - - for _, f := range strings.Fields(in) { - if p, ok := m[f]; ok { - *p = true - } - } - return nil -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/openshift/tools/vendor/golang.org/x/sys/cpu/runtime_auxv.go deleted file mode 100644 index 5f92ac9a2e..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/runtime_auxv.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) -// on platforms that use auxv. -var getAuxvFn func() []uintptr - -func getAuxv() []uintptr { - if getAuxvFn == nil { - return nil - } - return getAuxvFn() -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/openshift/tools/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go deleted file mode 100644 index 4c9788ea8e..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package cpu - -import ( - _ "unsafe" // for linkname -) - -//go:linkname runtime_getAuxv runtime.getAuxv -func runtime_getAuxv() []uintptr - -func init() { - getAuxvFn = runtime_getAuxv -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go deleted file mode 100644 index 1b9ccb091a..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Recreate a getsystemcfg syscall handler instead of -// using the one provided by x/sys/unix to avoid having -// the dependency between them. (See golang.org/issue/32102) -// Moreover, this file will be used during the building of -// gccgo's libgo and thus must not used a CGo method. - -//go:build aix && gccgo - -package cpu - -import ( - "syscall" -) - -//extern getsystemcfg -func gccgoGetsystemcfg(label uint32) (r uint64) - -func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { - r1 = uintptr(gccgoGetsystemcfg(uint32(label))) - e1 = syscall.GetErrno() - return -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go deleted file mode 100644 index e8b6cdbe9a..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Minimal copy of x/sys/unix so the cpu package can make a -// system call on AIX without depending on x/sys/unix. -// (See golang.org/issue/32102) - -//go:build aix && ppc64 && gc - -package cpu - -import ( - "syscall" - "unsafe" -) - -//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" - -//go:linkname libc_getsystemcfg libc_getsystemcfg - -type syscallFunc uintptr - -var libc_getsystemcfg syscallFunc - -type errno = syscall.Errno - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) - -func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) - return -} diff --git a/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go deleted file mode 100644 index 4d0888b0c0..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Minimal copy of x/sys/unix so the cpu package can make a -// system call on Darwin without depending on x/sys/unix. - -//go:build darwin && amd64 && gc - -package cpu - -import ( - "syscall" - "unsafe" -) - -type _C_int int32 - -// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 -func darwinOSRelease(release *[256]byte) error { - // from x/sys/unix/zerrors_openbsd_amd64.go - const ( - CTL_KERN = 0x1 - KERN_OSRELEASE = 0x2 - ) - - mib := []_C_int{CTL_KERN, KERN_OSRELEASE} - n := unsafe.Sizeof(*release) - - return sysctl(mib, &release[0], &n, nil, 0) -} - -type Errno = syscall.Errno - -var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. - -// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - if _, _, err := syscall_syscall6( - libc_sysctl_trampoline_addr, - uintptr(_p0), - uintptr(len(mib)), - uintptr(unsafe.Pointer(old)), - uintptr(unsafe.Pointer(oldlen)), - uintptr(unsafe.Pointer(new)), - uintptr(newlen), - ); err != 0 { - return err - } - - return nil -} - -var libc_sysctl_trampoline_addr uintptr - -// adapted from internal/cpu/cpu_arm64_darwin.go -func darwinSysctlEnabled(name []byte) bool { - out := int32(0) - nout := unsafe.Sizeof(out) - if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { - return false - } - return out > 0 -} - -//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" - -var libc_sysctlbyname_trampoline_addr uintptr - -// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix -func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { - if _, _, err := syscall_syscall6( - libc_sysctlbyname_trampoline_addr, - uintptr(unsafe.Pointer(name)), - uintptr(unsafe.Pointer(old)), - uintptr(unsafe.Pointer(oldlen)), - uintptr(unsafe.Pointer(new)), - uintptr(newlen), - 0, - ); err != 0 { - return err - } - - return nil -} - -//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" - -// Implemented in the runtime package (runtime/sys_darwin.go) -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/openshift/tools/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/openshift/tools/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index 73687de748..0000000000 --- a/openshift/tools/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/openshift/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/openshift/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go index fb94582184..7a76489db1 100644 --- a/openshift/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/openshift/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,22 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.5 - package plan9 +import "syscall" + func fixwd() { + syscall.Fixwd() } func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) + return syscall.Getwd() } func Chdir(path string) error { - return chdir(path) + return syscall.Chdir(path) } diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/affinity_linux.go b/openshift/tools/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81acd0..3ea470387b 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { + clear(s[:]) +} + +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { for i := range s { - s[i] = 0 + s[i] = ^cpuMask(0) } } diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/fdset.go b/openshift/tools/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18cd0..62ed12645f 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/fdset.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go b/openshift/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae4c..309f5a2b0c 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/mkall.sh b/openshift/tools/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d374d..d0ed611912 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/mkall.sh +++ b/openshift/tools/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/mkerrors.sh b/openshift/tools/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c31..42517077c4 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/openshift/tools/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -226,6 +226,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -349,6 +350,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' @@ -526,6 +530,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3b..7838ca5db2 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_linux.go b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_linux.go index 4958a65708..06c0eea6fb 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } @@ -2645,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099af..34a4676973 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc3955477..18a3d9bdab 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8f..d0a75da572 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,6 +319,7 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f AUDIT_IPE_ACCESS = 0x58c @@ -327,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -491,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -527,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -554,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -843,24 +849,90 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -936,11 +1008,10 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -1147,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -1203,13 +1284,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1224,9 +1310,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1240,6 +1329,7 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 @@ -1247,6 +1337,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1266,6 +1357,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1574,7 +1666,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1625,7 +1716,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1687,7 +1777,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1809,7 +1898,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2259,7 +2352,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2446,6 +2699,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2485,6 +2791,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2644,6 +2954,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2724,6 +3038,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2732,6 +3047,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -2787,7 +3119,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2864,10 +3196,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2917,11 +3251,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2970,6 +3306,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2987,11 +3324,12 @@ const ( RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x7f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3059,6 +3397,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3271,6 +3650,7 @@ const ( STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3284,6 +3664,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3322,7 +3712,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3392,8 +3782,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3503,6 +3891,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3515,14 +3904,14 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf @@ -3559,7 +3948,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3673,6 +4062,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c7..1c37f9fbc4 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -372,6 +374,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda535..6f54d34aef 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -373,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab86..783ec5c126 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -378,6 +380,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d3..ca83d3ba16 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa9..607e611c0c 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -365,6 +367,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37a..b9cb5bd3c0 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96a..65b078a638 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902a..5298a3033d 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e27..7bc557c876 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c224272615..152399bb04 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -426,6 +428,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee13..1a1ce2409c 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941f..4231a1fb57 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc42..21c0e95266 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -362,6 +364,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb80..f00d1cd7cf 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -434,6 +436,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e57514..bc8d539e6a 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -473,6 +475,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb2f..8935d10a31 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413c4..b4609c20c2 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -221,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -371,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306..aca56ee494 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e..2ea1ef58c3 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e5029744..d22c8af319 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51b..5ee264ae97 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18..f9f03ebf5f 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b..87c2118e84 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278..391ad102fb 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e6..5656157757 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc22..0482b52e3c 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb1..71806f08f3 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b446365025..e35a710582 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c188..2aea476705 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 8405391698..6c9bb4e560 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6..680bc9915a 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d..620f271052 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe6472..c1a4670171 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -114,8 +114,10 @@ type Statx_t struct { Atomic_write_unit_min uint32 Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 _ [1]uint32 - _ [9]uint64 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -629,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -686,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -737,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -2226,8 +2241,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2314,6 +2332,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2594,8 +2617,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3041,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3075,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { @@ -3538,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3545,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -3802,7 +3858,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2d + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3907,17 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3949,7 +4024,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4015,7 +4095,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x6 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4101,6 +4183,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4613,6 +4708,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4623,6 +4719,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4682,6 +4779,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4717,6 +4815,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4747,9 +4846,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4774,9 +4874,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4809,12 +4912,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4943,7 +5048,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4978,6 +5085,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -5001,6 +5110,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -5032,6 +5145,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -5117,7 +5233,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5161,6 +5278,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5180,6 +5298,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5247,6 +5366,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5262,6 +5382,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5281,9 +5402,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5295,8 +5419,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5343,7 +5469,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5351,12 +5480,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5364,8 +5495,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5430,6 +5564,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5458,9 +5593,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5703,11 +5839,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5753,6 +5894,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5770,14 +5913,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5788,7 +5936,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5849,6 +6000,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -6007,6 +6159,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -6038,6 +6197,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc @@ -6176,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da43f..485f2d3a1b 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e1864..ecbd1ad8bc 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108b6..02f0463a44 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f1f..6f4d400d24 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26c1..cd532cfa55 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2f1..4133620851 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d45356..eaa37eb718 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea1866..98ae6a1e4a 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c48..cae1961594 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 8359728759..6ce3b4e028 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c68..c7429c6a14 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb62b..4bf4baf4ca 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ad05b51a60..e9709d70af 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce90037..fb44268ca7 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b56739c..9c38265c74 100644 --- a/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/openshift/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/openshift/tools/vendor/golang.org/x/sys/windows/syscall_windows.go b/openshift/tools/vendor/golang.org/x/sys/windows/syscall_windows.go index 640f6b153f..69439df2a4 100644 --- a/openshift/tools/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/openshift/tools/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW @@ -890,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -914,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 diff --git a/openshift/tools/vendor/golang.org/x/sys/windows/types_windows.go b/openshift/tools/vendor/golang.org/x/sys/windows/types_windows.go index 958bcf47a3..6e4f50eb48 100644 --- a/openshift/tools/vendor/golang.org/x/sys/windows/types_windows.go +++ b/openshift/tools/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 @@ -1976,6 +1992,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 @@ -2298,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { diff --git a/openshift/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/openshift/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go index a58bc48b8e..f25b7308a1 100644 --- a/openshift/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/openshift/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -238,6 +242,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -284,6 +289,7 @@ var ( procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -546,25 +552,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -574,7 +580,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -586,7 +592,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -594,7 +600,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -602,7 +608,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -610,7 +616,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -618,7 +624,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -626,7 +632,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -634,7 +640,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -642,7 +648,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -650,7 +656,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -658,7 +664,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -675,7 +681,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -683,7 +689,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -691,7 +697,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -703,7 +709,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -711,7 +717,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -720,7 +726,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -728,7 +734,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -736,7 +742,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -744,7 +750,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -752,7 +758,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -760,7 +766,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -768,7 +774,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -776,7 +782,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -784,7 +790,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -792,13 +798,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } @@ -806,7 +812,7 @@ func FreeSid(sid *SID) (err error) { } func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { - r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) if r1 == 0 { err = errnoErr(e1) } @@ -814,7 +820,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { } func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -829,7 +835,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -837,7 +843,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -853,7 +859,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -867,7 +873,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -876,7 +882,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -886,7 +892,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -895,7 +901,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -911,7 +917,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -921,7 +927,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -929,25 +935,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -955,7 +961,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -963,7 +969,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -979,7 +985,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -987,7 +993,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -996,25 +1002,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1022,7 +1028,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1030,7 +1036,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1038,7 +1044,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1046,7 +1052,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1054,7 +1060,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1062,7 +1068,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1070,7 +1076,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1079,7 +1085,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1092,7 +1098,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1100,7 +1106,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1108,7 +1114,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1120,7 +1126,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1128,7 +1134,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1136,7 +1142,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1144,7 +1150,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1152,7 +1158,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1160,7 +1166,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1176,7 +1182,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1184,7 +1190,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1192,7 +1198,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1200,7 +1206,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1208,7 +1214,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1217,7 +1223,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1226,7 +1232,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1234,7 +1240,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1242,7 +1248,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1250,7 +1256,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1267,7 +1273,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1275,7 +1281,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1291,7 +1297,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1303,7 +1309,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1315,7 +1321,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1323,7 +1329,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1336,7 +1342,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1344,7 +1350,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1352,7 +1358,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1360,7 +1366,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1368,7 +1374,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1376,7 +1382,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1384,7 +1390,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1392,7 +1398,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1400,7 +1406,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1408,7 +1414,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1417,7 +1423,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1425,13 +1431,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1440,7 +1446,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1449,7 +1455,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1458,18 +1464,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1477,7 +1483,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1485,13 +1491,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1500,7 +1506,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1509,7 +1515,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1521,7 +1527,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1530,7 +1536,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1538,7 +1544,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1546,7 +1552,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1554,7 +1560,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1562,7 +1568,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1571,7 +1577,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1586,7 +1592,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1594,12 +1600,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1607,7 +1613,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1615,15 +1621,20 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) if r0 != 0 { errcode = syscall.Errno(r0) } return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1631,7 +1642,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1639,7 +1650,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1647,7 +1658,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1655,7 +1666,23 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1663,7 +1690,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1675,7 +1702,19 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1687,7 +1726,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1695,7 +1734,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1704,7 +1743,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1712,7 +1751,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1720,7 +1759,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1728,7 +1767,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1736,7 +1775,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1744,7 +1783,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1752,12 +1791,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1765,7 +1804,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1773,7 +1812,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1782,7 +1821,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1791,7 +1830,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1800,7 +1839,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1809,7 +1848,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1817,7 +1856,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1826,7 +1865,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1835,7 +1874,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1848,7 +1887,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1857,7 +1896,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1866,7 +1905,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1878,7 +1917,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1886,7 +1925,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1894,7 +1933,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1902,7 +1941,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1911,7 +1950,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1919,7 +1958,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1927,12 +1966,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1940,7 +1979,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1948,7 +1987,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1960,7 +1999,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1968,7 +2007,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1976,12 +2015,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1990,7 +2029,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1998,7 +2037,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2019,7 +2058,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2028,7 +2067,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2037,7 +2076,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2046,7 +2085,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2055,7 +2094,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2063,7 +2102,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2071,7 +2110,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2079,7 +2118,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2087,7 +2126,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2096,7 +2135,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2104,7 +2143,15 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) if r1 == 0 { err = errnoErr(e1) } @@ -2112,7 +2159,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2120,7 +2167,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2132,7 +2179,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2141,7 +2188,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2149,7 +2196,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2157,7 +2204,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2165,19 +2212,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2185,7 +2232,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2193,7 +2240,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2201,13 +2248,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2215,7 +2262,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2223,7 +2270,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { } func GetConsoleCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2232,7 +2279,7 @@ func GetConsoleCP() (cp uint32, err error) { } func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } @@ -2240,7 +2287,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { } func GetConsoleOutputCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2249,7 +2296,7 @@ func GetConsoleOutputCP() (cp uint32, err error) { } func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2257,7 +2304,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2266,19 +2313,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2286,13 +2333,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2301,7 +2348,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2310,7 +2357,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2318,7 +2365,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2326,7 +2373,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2335,7 +2382,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2343,7 +2390,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2351,7 +2398,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2359,7 +2406,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2368,7 +2415,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2377,7 +2424,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2386,13 +2433,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2400,7 +2447,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2409,7 +2456,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2418,7 +2465,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2427,13 +2474,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2442,7 +2489,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) if r1 == 0 { err = errnoErr(e1) } @@ -2450,7 +2497,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2458,7 +2505,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2466,7 +2513,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } @@ -2474,7 +2521,15 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 } func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) if r1 == 0 { err = errnoErr(e1) } @@ -2486,7 +2541,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2494,7 +2549,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2512,7 +2567,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2521,7 +2576,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2530,7 +2585,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2538,7 +2593,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2546,7 +2601,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2554,12 +2609,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2567,7 +2622,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2576,12 +2631,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2590,7 +2645,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2599,7 +2654,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2607,17 +2662,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2626,7 +2681,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2635,7 +2690,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2643,13 +2698,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2658,7 +2713,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2666,7 +2721,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2675,7 +2730,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2683,7 +2738,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2691,7 +2746,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2699,7 +2754,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2707,7 +2762,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2715,7 +2770,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2724,7 +2779,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2736,7 +2791,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2749,7 +2804,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2766,7 +2821,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2784,7 +2839,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2793,7 +2848,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2802,7 +2857,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2811,7 +2866,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2820,7 +2875,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2828,7 +2883,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2837,7 +2892,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2846,7 +2901,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2854,7 +2909,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2862,7 +2917,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2870,7 +2925,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2878,7 +2933,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2891,7 +2946,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2904,7 +2959,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2917,7 +2972,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2930,7 +2985,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2939,7 +2994,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2947,7 +3002,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2955,7 +3010,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2963,7 +3018,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2971,7 +3026,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2979,7 +3034,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2987,7 +3042,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2996,7 +3051,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -3004,7 +3059,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -3012,7 +3067,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -3024,7 +3079,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -3036,7 +3091,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3044,7 +3099,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -3052,7 +3107,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -3060,7 +3115,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3068,7 +3123,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -3076,7 +3131,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3084,7 +3139,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -3092,7 +3147,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -3101,7 +3156,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3109,7 +3164,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3117,7 +3172,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3125,7 +3180,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -3133,7 +3188,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func SetConsoleCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3141,7 +3196,7 @@ func SetConsoleCP(cp uint32) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3149,7 +3204,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) if r1 == 0 { err = errnoErr(e1) } @@ -3157,7 +3212,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetConsoleOutputCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3165,7 +3220,7 @@ func SetConsoleOutputCP(cp uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3173,7 +3228,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3190,7 +3245,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3198,7 +3253,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3206,7 +3261,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3214,13 +3269,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3228,7 +3283,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3236,7 +3291,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3244,7 +3299,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3252,7 +3307,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3261,7 +3316,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3269,7 +3324,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3277,7 +3332,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3285,7 +3340,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3294,7 +3349,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3302,7 +3357,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3314,7 +3369,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3322,7 +3377,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3330,7 +3385,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3338,7 +3393,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3346,7 +3401,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3354,7 +3409,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3362,7 +3417,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3370,7 +3425,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3383,13 +3438,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3397,7 +3452,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3405,7 +3460,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3413,7 +3468,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3421,7 +3476,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3429,7 +3484,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3437,7 +3492,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3445,7 +3500,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3454,7 +3509,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3462,7 +3517,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3470,7 +3525,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3478,7 +3533,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3486,7 +3541,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3494,7 +3549,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3502,7 +3557,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3510,13 +3565,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3528,7 +3583,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3537,7 +3592,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3546,7 +3601,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3558,7 +3613,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3566,7 +3621,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3574,7 +3629,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3582,12 +3637,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3595,7 +3650,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3603,7 +3658,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3611,7 +3666,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3619,7 +3674,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3627,7 +3682,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3635,7 +3690,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3643,7 +3698,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3651,7 +3706,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3659,7 +3714,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3667,7 +3722,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3675,7 +3730,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3683,13 +3738,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3697,13 +3752,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3711,7 +3766,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3719,18 +3774,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3738,23 +3793,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3762,7 +3817,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3770,7 +3825,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3778,7 +3833,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3786,23 +3841,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3810,7 +3865,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3818,7 +3873,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3826,7 +3881,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3834,7 +3889,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3842,7 +3897,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3850,7 +3905,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3862,7 +3917,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3874,12 +3929,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3887,7 +3942,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3895,7 +3950,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3903,7 +3958,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3911,7 +3966,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3919,7 +3974,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3927,7 +3982,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3935,7 +3990,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3944,7 +3999,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3952,7 +4007,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3960,7 +4015,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3968,7 +4023,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3976,7 +4031,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3984,7 +4039,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3993,7 +4048,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4001,7 +4056,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -4009,7 +4064,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4017,7 +4072,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4025,7 +4080,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -4033,7 +4088,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4041,7 +4096,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4049,7 +4104,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4057,7 +4112,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4065,7 +4120,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -4074,7 +4129,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4082,7 +4137,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4090,7 +4145,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4098,7 +4153,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4106,7 +4161,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4114,7 +4169,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -4122,7 +4177,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4131,7 +4186,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4139,7 +4194,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4147,12 +4202,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4160,7 +4215,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4168,7 +4223,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4177,19 +4232,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -4197,19 +4252,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { } func GetKeyboardLayout(tid uint32) (hkl Handle) { - r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) hkl = Handle(r0) return } func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4218,25 +4273,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) hkl = Handle(r0) if hkl == 0 { err = errnoErr(e1) @@ -4245,7 +4300,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { } func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4254,13 +4309,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i } func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { - r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) ret = int32(r0) return } func UnloadKeyboardLayout(hkl Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) if r1 == 0 { err = errnoErr(e1) } @@ -4272,7 +4327,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4280,7 +4335,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4288,7 +4343,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4305,7 +4360,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4323,7 +4378,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4340,7 +4395,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4348,7 +4403,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4356,7 +4411,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4364,7 +4419,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4372,12 +4427,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4385,7 +4440,7 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } @@ -4393,7 +4448,7 @@ func WSACleanup() (err error) { } func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { - r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) if r1 != 0 { err = errnoErr(e1) } @@ -4401,7 +4456,7 @@ func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err } func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4414,7 +4469,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4422,7 +4477,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4430,7 +4485,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4438,7 +4493,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4446,7 +4501,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4454,7 +4509,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4462,7 +4517,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4470,7 +4525,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4478,7 +4533,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4486,7 +4541,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4495,7 +4550,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4503,7 +4558,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4511,7 +4566,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4519,7 +4574,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4536,7 +4591,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4545,7 +4600,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4562,7 +4617,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4585,7 +4640,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4594,7 +4649,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4602,7 +4657,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4610,7 +4665,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4618,7 +4673,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4628,7 +4683,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4641,7 +4696,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4649,7 +4704,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4657,7 +4712,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4665,7 +4720,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4674,7 +4729,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4682,12 +4737,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/openshift/tools/vendor/golang.org/x/term/term_windows.go b/openshift/tools/vendor/golang.org/x/term/term_windows.go index df6bf948e1..0ddd81c02a 100644 --- a/openshift/tools/vendor/golang.org/x/term/term_windows.go +++ b/openshift/tools/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/openshift/tools/vendor/golang.org/x/term/terminal.go b/openshift/tools/vendor/golang.org/x/term/terminal.go index 13e9a64ad1..9255449b9b 100644 --- a/openshift/tools/vendor/golang.org/x/term/terminal.go +++ b/openshift/tools/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -412,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) { } } -// countToLeftWord returns then number of characters from the cursor to the +// countToLeftWord returns the number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { @@ -437,7 +438,7 @@ func (t *Terminal) countToLeftWord() int { return t.pos - pos } -// countToRightWord returns then number of characters from the cursor to the +// countToRightWord returns the number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos @@ -477,7 +478,7 @@ func visualLength(runes []rune) int { return length } -// histroryAt unlocks the terminal and relocks it while calling History.At. +// historyAt unlocks the terminal and relocks it while calling History.At. func (t *Terminal) historyAt(idx int) (string, bool) { t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. defer t.lock.Lock() // panic in At (or Len) protection. @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/openshift/tools/vendor/golang.org/x/text/encoding/encoding.go b/openshift/tools/vendor/golang.org/x/text/encoding/encoding.go deleted file mode 100644 index a0bd7cd4d0..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/encoding/encoding.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package encoding defines an interface for character encodings, such as Shift -// JIS and Windows 1252, that can convert to and from UTF-8. -// -// Encoding implementations are provided in other packages, such as -// golang.org/x/text/encoding/charmap and -// golang.org/x/text/encoding/japanese. -package encoding // import "golang.org/x/text/encoding" - -import ( - "errors" - "io" - "strconv" - "unicode/utf8" - - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/transform" -) - -// TODO: -// - There seems to be some inconsistency in when decoders return errors -// and when not. Also documentation seems to suggest they shouldn't return -// errors at all (except for UTF-16). -// - Encoders seem to rely on or at least benefit from the input being in NFC -// normal form. Perhaps add an example how users could prepare their output. - -// Encoding is a character set encoding that can be transformed to and from -// UTF-8. -type Encoding interface { - // NewDecoder returns a Decoder. - NewDecoder() *Decoder - - // NewEncoder returns an Encoder. - NewEncoder() *Encoder -} - -// A Decoder converts bytes to UTF-8. It implements transform.Transformer. -// -// Transforming source bytes that are not of that encoding will not result in an -// error per se. Each byte that cannot be transcoded will be represented in the -// output by the UTF-8 encoding of '\uFFFD', the replacement rune. -type Decoder struct { - transform.Transformer - - // This forces external creators of Decoders to use names in struct - // initializers, allowing for future extendibility without having to break - // code. - _ struct{} -} - -// Bytes converts the given encoded bytes to UTF-8. It returns the converted -// bytes or nil, err if any error occurred. -func (d *Decoder) Bytes(b []byte) ([]byte, error) { - b, _, err := transform.Bytes(d, b) - if err != nil { - return nil, err - } - return b, nil -} - -// String converts the given encoded string to UTF-8. It returns the converted -// string or "", err if any error occurred. -func (d *Decoder) String(s string) (string, error) { - s, _, err := transform.String(d, s) - if err != nil { - return "", err - } - return s, nil -} - -// Reader wraps another Reader to decode its bytes. -// -// The Decoder may not be used for any other operation as long as the returned -// Reader is in use. -func (d *Decoder) Reader(r io.Reader) io.Reader { - return transform.NewReader(r, d) -} - -// An Encoder converts bytes from UTF-8. It implements transform.Transformer. -// -// Each rune that cannot be transcoded will result in an error. In this case, -// the transform will consume all source byte up to, not including the offending -// rune. Transforming source bytes that are not valid UTF-8 will be replaced by -// `\uFFFD`. To return early with an error instead, use transform.Chain to -// preprocess the data with a UTF8Validator. -type Encoder struct { - transform.Transformer - - // This forces external creators of Encoders to use names in struct - // initializers, allowing for future extendibility without having to break - // code. - _ struct{} -} - -// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if -// any error occurred. -func (e *Encoder) Bytes(b []byte) ([]byte, error) { - b, _, err := transform.Bytes(e, b) - if err != nil { - return nil, err - } - return b, nil -} - -// String converts a string from UTF-8. It returns the converted string or -// "", err if any error occurred. -func (e *Encoder) String(s string) (string, error) { - s, _, err := transform.String(e, s) - if err != nil { - return "", err - } - return s, nil -} - -// Writer wraps another Writer to encode its UTF-8 output. -// -// The Encoder may not be used for any other operation as long as the returned -// Writer is in use. -func (e *Encoder) Writer(w io.Writer) io.Writer { - return transform.NewWriter(w, e) -} - -// ASCIISub is the ASCII substitute character, as recommended by -// https://unicode.org/reports/tr36/#Text_Comparison -const ASCIISub = '\x1a' - -// Nop is the nop encoding. Its transformed bytes are the same as the source -// bytes; it does not replace invalid UTF-8 sequences. -var Nop Encoding = nop{} - -type nop struct{} - -func (nop) NewDecoder() *Decoder { - return &Decoder{Transformer: transform.Nop} -} -func (nop) NewEncoder() *Encoder { - return &Encoder{Transformer: transform.Nop} -} - -// Replacement is the replacement encoding. Decoding from the replacement -// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to -// the replacement encoding yields the same as the source bytes except that -// invalid UTF-8 is converted to '\uFFFD'. -// -// It is defined at http://encoding.spec.whatwg.org/#replacement -var Replacement Encoding = replacement{} - -type replacement struct{} - -func (replacement) NewDecoder() *Decoder { - return &Decoder{Transformer: replacementDecoder{}} -} - -func (replacement) NewEncoder() *Encoder { - return &Encoder{Transformer: replacementEncoder{}} -} - -func (replacement) ID() (mib identifier.MIB, other string) { - return identifier.Replacement, "" -} - -type replacementDecoder struct{ transform.NopResetter } - -func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if len(dst) < 3 { - return 0, 0, transform.ErrShortDst - } - if atEOF { - const fffd = "\ufffd" - dst[0] = fffd[0] - dst[1] = fffd[1] - dst[2] = fffd[2] - nDst = 3 - } - return nDst, len(src), nil -} - -type replacementEncoder struct{ transform.NopResetter } - -func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - r, size := rune(0), 0 - - for ; nSrc < len(src); nSrc += size { - r = rune(src[nSrc]) - - // Decode a 1-byte rune. - if r < utf8.RuneSelf { - size = 1 - - } else { - // Decode a multi-byte rune. - r, size = utf8.DecodeRune(src[nSrc:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - r = '\ufffd' - } - } - - if nDst+utf8.RuneLen(r) > len(dst) { - err = transform.ErrShortDst - break - } - nDst += utf8.EncodeRune(dst[nDst:], r) - } - return nDst, nSrc, err -} - -// HTMLEscapeUnsupported wraps encoders to replace source runes outside the -// repertoire of the destination encoding with HTML escape sequences. -// -// This wrapper exists to comply to URL and HTML forms requiring a -// non-terminating legacy encoder. The produced sequences may lead to data -// loss as they are indistinguishable from legitimate input. To avoid this -// issue, use UTF-8 encodings whenever possible. -func HTMLEscapeUnsupported(e *Encoder) *Encoder { - return &Encoder{Transformer: &errorHandler{e, errorToHTML}} -} - -// ReplaceUnsupported wraps encoders to replace source runes outside the -// repertoire of the destination encoding with an encoding-specific -// replacement. -// -// This wrapper is only provided for backwards compatibility and legacy -// handling. Its use is strongly discouraged. Use UTF-8 whenever possible. -func ReplaceUnsupported(e *Encoder) *Encoder { - return &Encoder{Transformer: &errorHandler{e, errorToReplacement}} -} - -type errorHandler struct { - *Encoder - handler func(dst []byte, r rune, err repertoireError) (n int, ok bool) -} - -// TODO: consider making this error public in some form. -type repertoireError interface { - Replacement() byte -} - -func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF) - for err != nil { - rerr, ok := err.(repertoireError) - if !ok { - return nDst, nSrc, err - } - r, sz := utf8.DecodeRune(src[nSrc:]) - n, ok := h.handler(dst[nDst:], r, rerr) - if !ok { - return nDst, nSrc, transform.ErrShortDst - } - err = nil - nDst += n - if nSrc += sz; nSrc < len(src) { - var dn, sn int - dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF) - nDst += dn - nSrc += sn - } - } - return nDst, nSrc, err -} - -func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) { - buf := [8]byte{} - b := strconv.AppendUint(buf[:0], uint64(r), 10) - if n = len(b) + len("&#;"); n >= len(dst) { - return 0, false - } - dst[0] = '&' - dst[1] = '#' - dst[copy(dst[2:], b)+2] = ';' - return n, true -} - -func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) { - if len(dst) == 0 { - return 0, false - } - dst[0] = err.Replacement() - return 1, true -} - -// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8. -var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8") - -// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first -// input byte that is not valid UTF-8. -var UTF8Validator transform.Transformer = utf8Validator{} - -type utf8Validator struct{ transform.NopResetter } - -func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - n := len(src) - if n > len(dst) { - n = len(dst) - } - for i := 0; i < n; { - if c := src[i]; c < utf8.RuneSelf { - dst[i] = c - i++ - continue - } - _, size := utf8.DecodeRune(src[i:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - err = ErrInvalidUTF8 - if !atEOF && !utf8.FullRune(src[i:]) { - err = transform.ErrShortSrc - } - return i, i, err - } - if i+size > len(dst) { - return i, i, transform.ErrShortDst - } - for ; size > 0; size-- { - dst[i] = src[i] - i++ - } - } - if len(src) > len(dst) { - err = transform.ErrShortDst - } - return n, n, err -} diff --git a/openshift/tools/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/openshift/tools/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go deleted file mode 100644 index 5c9b85c280..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package identifier defines the contract between implementations of Encoding -// and Index by defining identifiers that uniquely identify standardized coded -// character sets (CCS) and character encoding schemes (CES), which we will -// together refer to as encodings, for which Encoding implementations provide -// converters to and from UTF-8. This package is typically only of concern to -// implementers of Indexes and Encodings. -// -// One part of the identifier is the MIB code, which is defined by IANA and -// uniquely identifies a CCS or CES. Each code is associated with data that -// references authorities, official documentation as well as aliases and MIME -// names. -// -// Not all CESs are covered by the IANA registry. The "other" string that is -// returned by ID can be used to identify other character sets or versions of -// existing ones. -// -// It is recommended that each package that provides a set of Encodings provide -// the All and Common variables to reference all supported encodings and -// commonly used subset. This allows Index implementations to include all -// available encodings without explicitly referencing or knowing about them. -package identifier - -// Note: this package is internal, but could be made public if there is a need -// for writing third-party Indexes and Encodings. - -// References: -// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt -// - http://www.iana.org/assignments/character-sets/character-sets.xhtml -// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib -// - http://www.ietf.org/rfc/rfc2978.txt -// - https://www.unicode.org/reports/tr22/ -// - http://www.w3.org/TR/encoding/ -// - https://encoding.spec.whatwg.org/ -// - https://encoding.spec.whatwg.org/encodings.json -// - https://tools.ietf.org/html/rfc6657#section-5 - -// Interface can be implemented by Encodings to define the CCS or CES for which -// it implements conversions. -type Interface interface { - // ID returns an encoding identifier. Exactly one of the mib and other - // values should be non-zero. - // - // In the usual case it is only necessary to indicate the MIB code. The - // other string can be used to specify encodings for which there is no MIB, - // such as "x-mac-dingbat". - // - // The other string may only contain the characters a-z, A-Z, 0-9, - and _. - ID() (mib MIB, other string) - - // NOTE: the restrictions on the encoding are to allow extending the syntax - // with additional information such as versions, vendors and other variants. -} - -// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds -// some identifiers for some encodings that are not covered by the IANA -// standard. -// -// See http://www.iana.org/assignments/ianacharset-mib. -type MIB uint16 - -// These additional MIB types are not defined in IANA. They are added because -// they are common and defined within the text repo. -const ( - // Unofficial marks the start of encodings not registered by IANA. - Unofficial MIB = 10000 + iota - - // Replacement is the WhatWG replacement encoding. - Replacement - - // XUserDefined is the code for x-user-defined. - XUserDefined - - // MacintoshCyrillic is the code for x-mac-cyrillic. - MacintoshCyrillic -) diff --git a/openshift/tools/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/openshift/tools/vendor/golang.org/x/text/encoding/internal/identifier/mib.go deleted file mode 100644 index 351fb86e29..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/encoding/internal/identifier/mib.go +++ /dev/null @@ -1,1627 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package identifier - -const ( - // ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII). - // - // ANSI X3.4-1986 - // Reference: RFC2046 - ASCII MIB = 3 - - // ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin1 MIB = 4 - - // ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin2 MIB = 5 - - // ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin3 MIB = 6 - - // ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin4 MIB = 7 - - // ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatinCyrillic MIB = 8 - - // ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatinArabic MIB = 9 - - // ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1947 - // Reference: RFC1345 - ISOLatinGreek MIB = 10 - - // ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatinHebrew MIB = 11 - - // ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin5 MIB = 12 - - // ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin6 MIB = 13 - - // ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add. - // - // ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983 - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOTextComm MIB = 14 - - // HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201. - // - // JIS X 0201-1976. One byte only, this is equivalent to - // JIS/Roman (similar to ASCII) plus eight-bit half-width - // Katakana - // Reference: RFC1345 - HalfWidthKatakana MIB = 15 - - // JISEncoding is the MIB identifier with IANA name JIS_Encoding. - // - // JIS X 0202-1991. Uses ISO 2022 escape sequences to - // shift code sets as documented in JIS X 0202-1991. - JISEncoding MIB = 16 - - // ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS). - // - // This charset is an extension of csHalfWidthKatakana by - // adding graphic characters in JIS X 0208. The CCS's are - // JIS X0201:1997 and JIS X0208:1997. The - // complete definition is shown in Appendix 1 of JIS - // X0208:1997. - // This charset can be used for the top-level media type "text". - ShiftJIS MIB = 17 - - // EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP). - // - // Standardized by OSF, UNIX International, and UNIX Systems - // Laboratories Pacific. Uses ISO 2022 rules to select - // code set 0: US-ASCII (a single 7-bit byte set) - // code set 1: JIS X0208-1990 (a double 8-bit byte set) - // restricted to A0-FF in both bytes - // code set 2: Half Width Katakana (a single 7-bit byte set) - // requiring SS2 as the character prefix - // code set 3: JIS X0212-1990 (a double 7-bit byte set) - // restricted to A0-FF in both bytes - // requiring SS3 as the character prefix - EUCPkdFmtJapanese MIB = 18 - - // EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese. - // - // Used in Japan. Each character is 2 octets. - // code set 0: US-ASCII (a single 7-bit byte set) - // 1st byte = 00 - // 2nd byte = 20-7E - // code set 1: JIS X0208-1990 (a double 7-bit byte set) - // restricted to A0-FF in both bytes - // code set 2: Half Width Katakana (a single 7-bit byte set) - // 1st byte = 00 - // 2nd byte = A0-FF - // code set 3: JIS X0212-1990 (a double 7-bit byte set) - // restricted to A0-FF in - // the first byte - // and 21-7E in the second byte - EUCFixWidJapanese MIB = 19 - - // ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO4UnitedKingdom MIB = 20 - - // ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO11SwedishForNames MIB = 21 - - // ISO15Italian is the MIB identifier with IANA name IT. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO15Italian MIB = 22 - - // ISO17Spanish is the MIB identifier with IANA name ES. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO17Spanish MIB = 23 - - // ISO21German is the MIB identifier with IANA name DIN_66003. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO21German MIB = 24 - - // ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO60Norwegian1 MIB = 25 - - // ISO69French is the MIB identifier with IANA name NF_Z_62-010. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO69French MIB = 26 - - // ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1. - // - // Universal Transfer Format (1), this is the multibyte - // encoding, that subsets ASCII-7. It does not have byte - // ordering issues. - ISO10646UTF1 MIB = 27 - - // ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO646basic1983 MIB = 28 - - // INVARIANT is the MIB identifier with IANA name INVARIANT. - // - // Reference: RFC1345 - INVARIANT MIB = 29 - - // ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO2IntlRefVersion MIB = 30 - - // NATSSEFI is the MIB identifier with IANA name NATS-SEFI. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSSEFI MIB = 31 - - // NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSSEFIADD MIB = 32 - - // NATSDANO is the MIB identifier with IANA name NATS-DANO. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSDANO MIB = 33 - - // NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSDANOADD MIB = 34 - - // ISO10Swedish is the MIB identifier with IANA name SEN_850200_B. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO10Swedish MIB = 35 - - // KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - KSC56011987 MIB = 36 - - // ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR). - // - // rfc1557 (see also KS_C_5601-1987) - // Reference: RFC1557 - ISO2022KR MIB = 37 - - // EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR). - // - // rfc1557 (see also KS_C_5861-1992) - // Reference: RFC1557 - EUCKR MIB = 38 - - // ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP). - // - // rfc1468 (see also rfc2237 ) - // Reference: RFC1468 - ISO2022JP MIB = 39 - - // ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2). - // - // rfc1554 - // Reference: RFC1554 - ISO2022JP2 MIB = 40 - - // ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO13JISC6220jp MIB = 41 - - // ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO14JISC6220ro MIB = 42 - - // ISO16Portuguese is the MIB identifier with IANA name PT. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO16Portuguese MIB = 43 - - // ISO18Greek7Old is the MIB identifier with IANA name greek7-old. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO18Greek7Old MIB = 44 - - // ISO19LatinGreek is the MIB identifier with IANA name latin-greek. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO19LatinGreek MIB = 45 - - // ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO25French MIB = 46 - - // ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO27LatinGreek1 MIB = 47 - - // ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO5427Cyrillic MIB = 48 - - // ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO42JISC62261978 MIB = 49 - - // ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO47BSViewdata MIB = 50 - - // ISO49INIS is the MIB identifier with IANA name INIS. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO49INIS MIB = 51 - - // ISO50INIS8 is the MIB identifier with IANA name INIS-8. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO50INIS8 MIB = 52 - - // ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO51INISCyrillic MIB = 53 - - // ISO54271981 is the MIB identifier with IANA name ISO_5427:1981. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO54271981 MIB = 54 - - // ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO5428Greek MIB = 55 - - // ISO57GB1988 is the MIB identifier with IANA name GB_1988-80. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO57GB1988 MIB = 56 - - // ISO58GB231280 is the MIB identifier with IANA name GB_2312-80. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO58GB231280 MIB = 57 - - // ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO61Norwegian2 MIB = 58 - - // ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO70VideotexSupp1 MIB = 59 - - // ISO84Portuguese2 is the MIB identifier with IANA name PT2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO84Portuguese2 MIB = 60 - - // ISO85Spanish2 is the MIB identifier with IANA name ES2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO85Spanish2 MIB = 61 - - // ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO86Hungarian MIB = 62 - - // ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO87JISX0208 MIB = 63 - - // ISO88Greek7 is the MIB identifier with IANA name greek7. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO88Greek7 MIB = 64 - - // ISO89ASMO449 is the MIB identifier with IANA name ASMO_449. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO89ASMO449 MIB = 65 - - // ISO90 is the MIB identifier with IANA name iso-ir-90. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO90 MIB = 66 - - // ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO91JISC62291984a MIB = 67 - - // ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO92JISC62991984b MIB = 68 - - // ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO93JIS62291984badd MIB = 69 - - // ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO94JIS62291984hand MIB = 70 - - // ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO95JIS62291984handadd MIB = 71 - - // ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO96JISC62291984kana MIB = 72 - - // ISO2033 is the MIB identifier with IANA name ISO_2033-1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO2033 MIB = 73 - - // ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO99NAPLPS MIB = 74 - - // ISO102T617bit is the MIB identifier with IANA name T.61-7bit. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO102T617bit MIB = 75 - - // ISO103T618bit is the MIB identifier with IANA name T.61-8bit. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO103T618bit MIB = 76 - - // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic. - // - // ISO registry - ISO111ECMACyrillic MIB = 77 - - // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO121Canadian1 MIB = 78 - - // ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO122Canadian2 MIB = 79 - - // ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO123CSAZ24341985gr MIB = 80 - - // ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E). - // - // rfc1556 - // Reference: RFC1556 - ISO88596E MIB = 81 - - // ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I). - // - // rfc1556 - // Reference: RFC1556 - ISO88596I MIB = 82 - - // ISO128T101G2 is the MIB identifier with IANA name T.101-G2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO128T101G2 MIB = 83 - - // ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E). - // - // rfc1556 - // Reference: RFC1556 - ISO88598E MIB = 84 - - // ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I). - // - // rfc1556 - // Reference: RFC1556 - ISO88598I MIB = 85 - - // ISO139CSN369103 is the MIB identifier with IANA name CSN_369103. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO139CSN369103 MIB = 86 - - // ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO141JUSIB1002 MIB = 87 - - // ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO143IECP271 MIB = 88 - - // ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO146Serbian MIB = 89 - - // ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO147Macedonian MIB = 90 - - // ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO150GreekCCITT MIB = 91 - - // ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO151Cuba MIB = 92 - - // ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO6937Add MIB = 93 - - // ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO153GOST1976874 MIB = 94 - - // ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO8859Supp MIB = 95 - - // ISO10367Box is the MIB identifier with IANA name ISO_10367-box. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO10367Box MIB = 96 - - // ISO158Lap is the MIB identifier with IANA name latin-lap. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO158Lap MIB = 97 - - // ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO159JISX02121990 MIB = 98 - - // ISO646Danish is the MIB identifier with IANA name DS_2089. - // - // Danish Standard, DS 2089, February 1974 - // Reference: RFC1345 - ISO646Danish MIB = 99 - - // USDK is the MIB identifier with IANA name us-dk. - // - // Reference: RFC1345 - USDK MIB = 100 - - // DKUS is the MIB identifier with IANA name dk-us. - // - // Reference: RFC1345 - DKUS MIB = 101 - - // KSC5636 is the MIB identifier with IANA name KSC5636. - // - // Reference: RFC1345 - KSC5636 MIB = 102 - - // Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7. - // - // rfc1642 - // Reference: RFC1642 - Unicode11UTF7 MIB = 103 - - // ISO2022CN is the MIB identifier with IANA name ISO-2022-CN. - // - // rfc1922 - // Reference: RFC1922 - ISO2022CN MIB = 104 - - // ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT. - // - // rfc1922 - // Reference: RFC1922 - ISO2022CNEXT MIB = 105 - - // UTF8 is the MIB identifier with IANA name UTF-8. - // - // rfc3629 - // Reference: RFC3629 - UTF8 MIB = 106 - - // ISO885913 is the MIB identifier with IANA name ISO-8859-13. - // - // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-13 https://www.iana.org/assignments/charset-reg/ISO-8859-13 - ISO885913 MIB = 109 - - // ISO885914 is the MIB identifier with IANA name ISO-8859-14. - // - // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-14 - ISO885914 MIB = 110 - - // ISO885915 is the MIB identifier with IANA name ISO-8859-15. - // - // ISO - // Please see: https://www.iana.org/assignments/charset-reg/ISO-8859-15 - ISO885915 MIB = 111 - - // ISO885916 is the MIB identifier with IANA name ISO-8859-16. - // - // ISO - ISO885916 MIB = 112 - - // GBK is the MIB identifier with IANA name GBK. - // - // Chinese IT Standardization Technical Committee - // Please see: https://www.iana.org/assignments/charset-reg/GBK - GBK MIB = 113 - - // GB18030 is the MIB identifier with IANA name GB18030. - // - // Chinese IT Standardization Technical Committee - // Please see: https://www.iana.org/assignments/charset-reg/GB18030 - GB18030 MIB = 114 - - // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15. - // - // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 - OSDEBCDICDF0415 MIB = 115 - - // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV. - // - // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV - OSDEBCDICDF03IRV MIB = 116 - - // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1. - // - // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 - OSDEBCDICDF041 MIB = 117 - - // ISO115481 is the MIB identifier with IANA name ISO-11548-1. - // - // See https://www.iana.org/assignments/charset-reg/ISO-11548-1 - ISO115481 MIB = 118 - - // KZ1048 is the MIB identifier with IANA name KZ-1048. - // - // See https://www.iana.org/assignments/charset-reg/KZ-1048 - KZ1048 MIB = 119 - - // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2. - // - // the 2-octet Basic Multilingual Plane, aka Unicode - // this needs to specify network byte order: the standard - // does not specify (it is a 16-bit integer space) - Unicode MIB = 1000 - - // UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4. - // - // the full code space. (same comment about byte order, - // these are 31-bit numbers. - UCS4 MIB = 1001 - - // UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic. - // - // ASCII subset of Unicode. Basic Latin = collection 1 - // See ISO 10646, Appendix A - UnicodeASCII MIB = 1002 - - // UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1. - // - // ISO Latin-1 subset of Unicode. Basic Latin and Latin-1 - // Supplement = collections 1 and 2. See ISO 10646, - // Appendix A. See rfc1815 . - UnicodeLatin1 MIB = 1003 - - // UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1. - // - // ISO 10646 Japanese, see rfc1815 . - UnicodeJapanese MIB = 1004 - - // UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261. - // - // IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261 - UnicodeIBM1261 MIB = 1005 - - // UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268. - // - // IBM Latin-4 Extended Presentation Set, GCSGID: 1268 - UnicodeIBM1268 MIB = 1006 - - // UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276. - // - // IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276 - UnicodeIBM1276 MIB = 1007 - - // UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264. - // - // IBM Arabic Presentation Set, GCSGID: 1264 - UnicodeIBM1264 MIB = 1008 - - // UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265. - // - // IBM Hebrew Presentation Set, GCSGID: 1265 - UnicodeIBM1265 MIB = 1009 - - // Unicode11 is the MIB identifier with IANA name UNICODE-1-1. - // - // rfc1641 - // Reference: RFC1641 - Unicode11 MIB = 1010 - - // SCSU is the MIB identifier with IANA name SCSU. - // - // SCSU See https://www.iana.org/assignments/charset-reg/SCSU - SCSU MIB = 1011 - - // UTF7 is the MIB identifier with IANA name UTF-7. - // - // rfc2152 - // Reference: RFC2152 - UTF7 MIB = 1012 - - // UTF16BE is the MIB identifier with IANA name UTF-16BE. - // - // rfc2781 - // Reference: RFC2781 - UTF16BE MIB = 1013 - - // UTF16LE is the MIB identifier with IANA name UTF-16LE. - // - // rfc2781 - // Reference: RFC2781 - UTF16LE MIB = 1014 - - // UTF16 is the MIB identifier with IANA name UTF-16. - // - // rfc2781 - // Reference: RFC2781 - UTF16 MIB = 1015 - - // CESU8 is the MIB identifier with IANA name CESU-8. - // - // https://www.unicode.org/reports/tr26 - CESU8 MIB = 1016 - - // UTF32 is the MIB identifier with IANA name UTF-32. - // - // https://www.unicode.org/reports/tr19/ - UTF32 MIB = 1017 - - // UTF32BE is the MIB identifier with IANA name UTF-32BE. - // - // https://www.unicode.org/reports/tr19/ - UTF32BE MIB = 1018 - - // UTF32LE is the MIB identifier with IANA name UTF-32LE. - // - // https://www.unicode.org/reports/tr19/ - UTF32LE MIB = 1019 - - // BOCU1 is the MIB identifier with IANA name BOCU-1. - // - // https://www.unicode.org/notes/tn6/ - BOCU1 MIB = 1020 - - // UTF7IMAP is the MIB identifier with IANA name UTF-7-IMAP. - // - // Note: This charset is used to encode Unicode in IMAP mailbox names; - // see section 5.1.3 of rfc3501 . It should never be used - // outside this context. A name has been assigned so that charset processing - // implementations can refer to it in a consistent way. - UTF7IMAP MIB = 1021 - - // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. - // - // Extended ISO 8859-1 Latin-1 for Windows 3.0. - // PCL Symbol Set id: 9U - Windows30Latin1 MIB = 2000 - - // Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1. - // - // Extended ISO 8859-1 Latin-1 for Windows 3.1. - // PCL Symbol Set id: 19U - Windows31Latin1 MIB = 2001 - - // Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2. - // - // Extended ISO 8859-2. Latin-2 for Windows 3.1. - // PCL Symbol Set id: 9E - Windows31Latin2 MIB = 2002 - - // Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5. - // - // Extended ISO 8859-9. Latin-5 for Windows 3.1 - // PCL Symbol Set id: 5T - Windows31Latin5 MIB = 2003 - - // HPRoman8 is the MIB identifier with IANA name hp-roman8. - // - // LaserJet IIP Printer User's Manual, - // HP part no 33471-90901, Hewlet-Packard, June 1989. - // Reference: RFC1345 - HPRoman8 MIB = 2004 - - // AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding. - // - // PostScript Language Reference Manual - // PCL Symbol Set id: 10J - AdobeStandardEncoding MIB = 2005 - - // VenturaUS is the MIB identifier with IANA name Ventura-US. - // - // Ventura US. ASCII plus characters typically used in - // publishing, like pilcrow, copyright, registered, trade mark, - // section, dagger, and double dagger in the range A0 (hex) - // to FF (hex). - // PCL Symbol Set id: 14J - VenturaUS MIB = 2006 - - // VenturaInternational is the MIB identifier with IANA name Ventura-International. - // - // Ventura International. ASCII plus coded characters similar - // to Roman8. - // PCL Symbol Set id: 13J - VenturaInternational MIB = 2007 - - // DECMCS is the MIB identifier with IANA name DEC-MCS. - // - // VAX/VMS User's Manual, - // Order Number: AI-Y517A-TE, April 1986. - // Reference: RFC1345 - DECMCS MIB = 2008 - - // PC850Multilingual is the MIB identifier with IANA name IBM850. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PC850Multilingual MIB = 2009 - - // PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian. - // - // PC Danish Norwegian - // 8-bit PC set for Danish Norwegian - // PCL Symbol Set id: 11U - PC8DanishNorwegian MIB = 2012 - - // PC862LatinHebrew is the MIB identifier with IANA name IBM862. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PC862LatinHebrew MIB = 2013 - - // PC8Turkish is the MIB identifier with IANA name PC8-Turkish. - // - // PC Latin Turkish. PCL Symbol Set id: 9T - PC8Turkish MIB = 2014 - - // IBMSymbols is the MIB identifier with IANA name IBM-Symbols. - // - // Presentation Set, CPGID: 259 - IBMSymbols MIB = 2015 - - // IBMThai is the MIB identifier with IANA name IBM-Thai. - // - // Presentation Set, CPGID: 838 - IBMThai MIB = 2016 - - // HPLegal is the MIB identifier with IANA name HP-Legal. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 1U - HPLegal MIB = 2017 - - // HPPiFont is the MIB identifier with IANA name HP-Pi-font. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 15U - HPPiFont MIB = 2018 - - // HPMath8 is the MIB identifier with IANA name HP-Math8. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 8M - HPMath8 MIB = 2019 - - // HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding. - // - // PostScript Language Reference Manual - // PCL Symbol Set id: 5M - HPPSMath MIB = 2020 - - // HPDesktop is the MIB identifier with IANA name HP-DeskTop. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 7J - HPDesktop MIB = 2021 - - // VenturaMath is the MIB identifier with IANA name Ventura-Math. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 6M - VenturaMath MIB = 2022 - - // MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 6J - MicrosoftPublishing MIB = 2023 - - // Windows31J is the MIB identifier with IANA name Windows-31J. - // - // Windows Japanese. A further extension of Shift_JIS - // to include NEC special characters (Row 13), NEC - // selection of IBM extensions (Rows 89 to 92), and IBM - // extensions (Rows 115 to 119). The CCS's are - // JIS X0201:1997, JIS X0208:1997, and these extensions. - // This charset can be used for the top-level media type "text", - // but it is of limited or specialized use (see rfc2278 ). - // PCL Symbol Set id: 19K - Windows31J MIB = 2024 - - // GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312). - // - // Chinese for People's Republic of China (PRC) mixed one byte, - // two byte set: - // 20-7E = one byte ASCII - // A1-FE = two byte PRC Kanji - // See GB 2312-80 - // PCL Symbol Set Id: 18C - GB2312 MIB = 2025 - - // Big5 is the MIB identifier with IANA name Big5 (MIME: Big5). - // - // Chinese for Taiwan Multi-byte set. - // PCL Symbol Set Id: 18T - Big5 MIB = 2026 - - // Macintosh is the MIB identifier with IANA name macintosh. - // - // The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991 - // Reference: RFC1345 - Macintosh MIB = 2027 - - // IBM037 is the MIB identifier with IANA name IBM037. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM037 MIB = 2028 - - // IBM038 is the MIB identifier with IANA name IBM038. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM038 MIB = 2029 - - // IBM273 is the MIB identifier with IANA name IBM273. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM273 MIB = 2030 - - // IBM274 is the MIB identifier with IANA name IBM274. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM274 MIB = 2031 - - // IBM275 is the MIB identifier with IANA name IBM275. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM275 MIB = 2032 - - // IBM277 is the MIB identifier with IANA name IBM277. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM277 MIB = 2033 - - // IBM278 is the MIB identifier with IANA name IBM278. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM278 MIB = 2034 - - // IBM280 is the MIB identifier with IANA name IBM280. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM280 MIB = 2035 - - // IBM281 is the MIB identifier with IANA name IBM281. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM281 MIB = 2036 - - // IBM284 is the MIB identifier with IANA name IBM284. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM284 MIB = 2037 - - // IBM285 is the MIB identifier with IANA name IBM285. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM285 MIB = 2038 - - // IBM290 is the MIB identifier with IANA name IBM290. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM290 MIB = 2039 - - // IBM297 is the MIB identifier with IANA name IBM297. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM297 MIB = 2040 - - // IBM420 is the MIB identifier with IANA name IBM420. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990, - // IBM NLS RM p 11-11 - // Reference: RFC1345 - IBM420 MIB = 2041 - - // IBM423 is the MIB identifier with IANA name IBM423. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM423 MIB = 2042 - - // IBM424 is the MIB identifier with IANA name IBM424. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM424 MIB = 2043 - - // PC8CodePage437 is the MIB identifier with IANA name IBM437. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PC8CodePage437 MIB = 2011 - - // IBM500 is the MIB identifier with IANA name IBM500. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM500 MIB = 2044 - - // IBM851 is the MIB identifier with IANA name IBM851. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM851 MIB = 2045 - - // PCp852 is the MIB identifier with IANA name IBM852. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PCp852 MIB = 2010 - - // IBM855 is the MIB identifier with IANA name IBM855. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM855 MIB = 2046 - - // IBM857 is the MIB identifier with IANA name IBM857. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM857 MIB = 2047 - - // IBM860 is the MIB identifier with IANA name IBM860. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM860 MIB = 2048 - - // IBM861 is the MIB identifier with IANA name IBM861. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM861 MIB = 2049 - - // IBM863 is the MIB identifier with IANA name IBM863. - // - // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 - // Reference: RFC1345 - IBM863 MIB = 2050 - - // IBM864 is the MIB identifier with IANA name IBM864. - // - // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 - // Reference: RFC1345 - IBM864 MIB = 2051 - - // IBM865 is the MIB identifier with IANA name IBM865. - // - // IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987) - // Reference: RFC1345 - IBM865 MIB = 2052 - - // IBM868 is the MIB identifier with IANA name IBM868. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM868 MIB = 2053 - - // IBM869 is the MIB identifier with IANA name IBM869. - // - // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 - // Reference: RFC1345 - IBM869 MIB = 2054 - - // IBM870 is the MIB identifier with IANA name IBM870. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM870 MIB = 2055 - - // IBM871 is the MIB identifier with IANA name IBM871. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM871 MIB = 2056 - - // IBM880 is the MIB identifier with IANA name IBM880. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM880 MIB = 2057 - - // IBM891 is the MIB identifier with IANA name IBM891. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM891 MIB = 2058 - - // IBM903 is the MIB identifier with IANA name IBM903. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM903 MIB = 2059 - - // IBBM904 is the MIB identifier with IANA name IBM904. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBBM904 MIB = 2060 - - // IBM905 is the MIB identifier with IANA name IBM905. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM905 MIB = 2061 - - // IBM918 is the MIB identifier with IANA name IBM918. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM918 MIB = 2062 - - // IBM1026 is the MIB identifier with IANA name IBM1026. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM1026 MIB = 2063 - - // IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - IBMEBCDICATDE MIB = 2064 - - // EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICATDEA MIB = 2065 - - // EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICCAFR MIB = 2066 - - // EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICDKNO MIB = 2067 - - // EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICDKNOA MIB = 2068 - - // EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICFISE MIB = 2069 - - // EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICFISEA MIB = 2070 - - // EBCDICFR is the MIB identifier with IANA name EBCDIC-FR. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICFR MIB = 2071 - - // EBCDICIT is the MIB identifier with IANA name EBCDIC-IT. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICIT MIB = 2072 - - // EBCDICPT is the MIB identifier with IANA name EBCDIC-PT. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICPT MIB = 2073 - - // EBCDICES is the MIB identifier with IANA name EBCDIC-ES. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICES MIB = 2074 - - // EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICESA MIB = 2075 - - // EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICESS MIB = 2076 - - // EBCDICUK is the MIB identifier with IANA name EBCDIC-UK. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICUK MIB = 2077 - - // EBCDICUS is the MIB identifier with IANA name EBCDIC-US. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICUS MIB = 2078 - - // Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT. - // - // Reference: RFC1428 - Unknown8BiT MIB = 2079 - - // Mnemonic is the MIB identifier with IANA name MNEMONIC. - // - // rfc1345 , also known as "mnemonic+ascii+38" - // Reference: RFC1345 - Mnemonic MIB = 2080 - - // Mnem is the MIB identifier with IANA name MNEM. - // - // rfc1345 , also known as "mnemonic+ascii+8200" - // Reference: RFC1345 - Mnem MIB = 2081 - - // VISCII is the MIB identifier with IANA name VISCII. - // - // rfc1456 - // Reference: RFC1456 - VISCII MIB = 2082 - - // VIQR is the MIB identifier with IANA name VIQR. - // - // rfc1456 - // Reference: RFC1456 - VIQR MIB = 2083 - - // KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R). - // - // rfc1489 , based on GOST-19768-74, ISO-6937/8, - // INIS-Cyrillic, ISO-5427. - // Reference: RFC1489 - KOI8R MIB = 2084 - - // HZGB2312 is the MIB identifier with IANA name HZ-GB-2312. - // - // rfc1842 , rfc1843 rfc1843 rfc1842 - HZGB2312 MIB = 2085 - - // IBM866 is the MIB identifier with IANA name IBM866. - // - // IBM NLDG Volume 2 (SE09-8002-03) August 1994 - IBM866 MIB = 2086 - - // PC775Baltic is the MIB identifier with IANA name IBM775. - // - // HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996 - PC775Baltic MIB = 2087 - - // KOI8U is the MIB identifier with IANA name KOI8-U. - // - // rfc2319 - // Reference: RFC2319 - KOI8U MIB = 2088 - - // IBM00858 is the MIB identifier with IANA name IBM00858. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM00858 - IBM00858 MIB = 2089 - - // IBM00924 is the MIB identifier with IANA name IBM00924. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM00924 - IBM00924 MIB = 2090 - - // IBM01140 is the MIB identifier with IANA name IBM01140. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01140 - IBM01140 MIB = 2091 - - // IBM01141 is the MIB identifier with IANA name IBM01141. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01141 - IBM01141 MIB = 2092 - - // IBM01142 is the MIB identifier with IANA name IBM01142. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01142 - IBM01142 MIB = 2093 - - // IBM01143 is the MIB identifier with IANA name IBM01143. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01143 - IBM01143 MIB = 2094 - - // IBM01144 is the MIB identifier with IANA name IBM01144. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01144 - IBM01144 MIB = 2095 - - // IBM01145 is the MIB identifier with IANA name IBM01145. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01145 - IBM01145 MIB = 2096 - - // IBM01146 is the MIB identifier with IANA name IBM01146. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01146 - IBM01146 MIB = 2097 - - // IBM01147 is the MIB identifier with IANA name IBM01147. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01147 - IBM01147 MIB = 2098 - - // IBM01148 is the MIB identifier with IANA name IBM01148. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01148 - IBM01148 MIB = 2099 - - // IBM01149 is the MIB identifier with IANA name IBM01149. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01149 - IBM01149 MIB = 2100 - - // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS. - // - // See https://www.iana.org/assignments/charset-reg/Big5-HKSCS - Big5HKSCS MIB = 2101 - - // IBM1047 is the MIB identifier with IANA name IBM1047. - // - // IBM1047 (EBCDIC Latin 1/Open Systems) https://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf - IBM1047 MIB = 2102 - - // PTCP154 is the MIB identifier with IANA name PTCP154. - // - // See https://www.iana.org/assignments/charset-reg/PTCP154 - PTCP154 MIB = 2103 - - // Amiga1251 is the MIB identifier with IANA name Amiga-1251. - // - // See https://www.amiga.ultranet.ru/Amiga-1251.html - Amiga1251 MIB = 2104 - - // KOI7switched is the MIB identifier with IANA name KOI7-switched. - // - // See https://www.iana.org/assignments/charset-reg/KOI7-switched - KOI7switched MIB = 2105 - - // BRF is the MIB identifier with IANA name BRF. - // - // See https://www.iana.org/assignments/charset-reg/BRF - BRF MIB = 2106 - - // TSCII is the MIB identifier with IANA name TSCII. - // - // See https://www.iana.org/assignments/charset-reg/TSCII - TSCII MIB = 2107 - - // CP51932 is the MIB identifier with IANA name CP51932. - // - // See https://www.iana.org/assignments/charset-reg/CP51932 - CP51932 MIB = 2108 - - // Windows874 is the MIB identifier with IANA name windows-874. - // - // See https://www.iana.org/assignments/charset-reg/windows-874 - Windows874 MIB = 2109 - - // Windows1250 is the MIB identifier with IANA name windows-1250. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1250 - Windows1250 MIB = 2250 - - // Windows1251 is the MIB identifier with IANA name windows-1251. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1251 - Windows1251 MIB = 2251 - - // Windows1252 is the MIB identifier with IANA name windows-1252. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1252 - Windows1252 MIB = 2252 - - // Windows1253 is the MIB identifier with IANA name windows-1253. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1253 - Windows1253 MIB = 2253 - - // Windows1254 is the MIB identifier with IANA name windows-1254. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1254 - Windows1254 MIB = 2254 - - // Windows1255 is the MIB identifier with IANA name windows-1255. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1255 - Windows1255 MIB = 2255 - - // Windows1256 is the MIB identifier with IANA name windows-1256. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1256 - Windows1256 MIB = 2256 - - // Windows1257 is the MIB identifier with IANA name windows-1257. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1257 - Windows1257 MIB = 2257 - - // Windows1258 is the MIB identifier with IANA name windows-1258. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1258 - Windows1258 MIB = 2258 - - // TIS620 is the MIB identifier with IANA name TIS-620. - // - // Thai Industrial Standards Institute (TISI) - TIS620 MIB = 2259 - - // CP50220 is the MIB identifier with IANA name CP50220. - // - // See https://www.iana.org/assignments/charset-reg/CP50220 - CP50220 MIB = 2260 -) diff --git a/openshift/tools/vendor/golang.org/x/text/encoding/internal/internal.go b/openshift/tools/vendor/golang.org/x/text/encoding/internal/internal.go deleted file mode 100644 index 413e6fc6d7..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/encoding/internal/internal.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains code that is shared among encoding implementations. -package internal - -import ( - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/transform" -) - -// Encoding is an implementation of the Encoding interface that adds the String -// and ID methods to an existing encoding. -type Encoding struct { - encoding.Encoding - Name string - MIB identifier.MIB -} - -// _ verifies that Encoding implements identifier.Interface. -var _ identifier.Interface = (*Encoding)(nil) - -func (e *Encoding) String() string { - return e.Name -} - -func (e *Encoding) ID() (mib identifier.MIB, other string) { - return e.MIB, "" -} - -// SimpleEncoding is an Encoding that combines two Transformers. -type SimpleEncoding struct { - Decoder transform.Transformer - Encoder transform.Transformer -} - -func (e *SimpleEncoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: e.Decoder} -} - -func (e *SimpleEncoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: e.Encoder} -} - -// FuncEncoding is an Encoding that combines two functions returning a new -// Transformer. -type FuncEncoding struct { - Decoder func() transform.Transformer - Encoder func() transform.Transformer -} - -func (e FuncEncoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: e.Decoder()} -} - -func (e FuncEncoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: e.Encoder()} -} - -// A RepertoireError indicates a rune is not in the repertoire of a destination -// encoding. It is associated with an encoding-specific suggested replacement -// byte. -type RepertoireError byte - -// Error implements the error interface. -func (r RepertoireError) Error() string { - return "encoding: rune not supported by encoding." -} - -// Replacement returns the replacement string associated with this error. -func (r RepertoireError) Replacement() byte { return byte(r) } - -var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub) diff --git a/openshift/tools/vendor/golang.org/x/text/encoding/unicode/override.go b/openshift/tools/vendor/golang.org/x/text/encoding/unicode/override.go deleted file mode 100644 index 35d62fcc99..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/encoding/unicode/override.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unicode - -import ( - "golang.org/x/text/transform" -) - -// BOMOverride returns a new decoder transformer that is identical to fallback, -// except that the presence of a Byte Order Mark at the start of the input -// causes it to switch to the corresponding Unicode decoding. It will only -// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE. -// -// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not -// just UTF-16 variants, and allowing falling back to any encoding scheme. -// -// This technique is recommended by the W3C for use in HTML 5: "For -// compatibility with deployed content, the byte order mark (also known as BOM) -// is considered more authoritative than anything else." -// http://www.w3.org/TR/encoding/#specification-hooks -// -// Using BOMOverride is mostly intended for use cases where the first characters -// of a fallback encoding are known to not be a BOM, for example, for valid HTML -// and most encodings. -func BOMOverride(fallback transform.Transformer) transform.Transformer { - // TODO: possibly allow a variadic argument of unicode encodings to allow - // specifying details of which fallbacks are supported as well as - // specifying the details of the implementations. This would also allow for - // support for UTF-32, which should not be supported by default. - return &bomOverride{fallback: fallback} -} - -type bomOverride struct { - fallback transform.Transformer - current transform.Transformer -} - -func (d *bomOverride) Reset() { - d.current = nil - d.fallback.Reset() -} - -var ( - // TODO: we could use decode functions here, instead of allocating a new - // decoder on every NewDecoder as IgnoreBOM decoders can be stateless. - utf16le = UTF16(LittleEndian, IgnoreBOM) - utf16be = UTF16(BigEndian, IgnoreBOM) -) - -const utf8BOM = "\ufeff" - -func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if d.current != nil { - return d.current.Transform(dst, src, atEOF) - } - if len(src) < 3 && !atEOF { - return 0, 0, transform.ErrShortSrc - } - d.current = d.fallback - bomSize := 0 - if len(src) >= 2 { - if src[0] == 0xFF && src[1] == 0xFE { - d.current = utf16le.NewDecoder() - bomSize = 2 - } else if src[0] == 0xFE && src[1] == 0xFF { - d.current = utf16be.NewDecoder() - bomSize = 2 - } else if len(src) >= 3 && - src[0] == utf8BOM[0] && - src[1] == utf8BOM[1] && - src[2] == utf8BOM[2] { - d.current = transform.Nop - bomSize = 3 - } - } - if bomSize < len(src) { - nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF) - } - return nDst, nSrc + bomSize, err -} diff --git a/openshift/tools/vendor/golang.org/x/text/encoding/unicode/unicode.go b/openshift/tools/vendor/golang.org/x/text/encoding/unicode/unicode.go deleted file mode 100644 index dd99ad14d3..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unicode provides Unicode encodings such as UTF-16. -package unicode // import "golang.org/x/text/encoding/unicode" - -import ( - "bytes" - "errors" - "unicode/utf16" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/internal" - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/internal/utf8internal" - "golang.org/x/text/runes" - "golang.org/x/text/transform" -) - -// TODO: I think the Transformers really should return errors on unmatched -// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781, -// which leaves it open, but is suggested by WhatWG. It will allow for all error -// modes as defined by WhatWG: fatal, HTML and Replacement. This would require -// the introduction of some kind of error type for conveying the erroneous code -// point. - -// UTF8 is the UTF-8 encoding. It neither removes nor adds byte order marks. -var UTF8 encoding.Encoding = utf8enc - -// UTF8BOM is an UTF-8 encoding where the decoder strips a leading byte order -// mark while the encoder adds one. -// -// Some editors add a byte order mark as a signature to UTF-8 files. Although -// the byte order mark is not useful for detecting byte order in UTF-8, it is -// sometimes used as a convention to mark UTF-8-encoded files. This relies on -// the observation that the UTF-8 byte order mark is either an illegal or at -// least very unlikely sequence in any other character encoding. -var UTF8BOM encoding.Encoding = utf8bomEncoding{} - -type utf8bomEncoding struct{} - -func (utf8bomEncoding) String() string { - return "UTF-8-BOM" -} - -func (utf8bomEncoding) ID() (identifier.MIB, string) { - return identifier.Unofficial, "x-utf8bom" -} - -func (utf8bomEncoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{ - Transformer: &utf8bomEncoder{t: runes.ReplaceIllFormed()}, - } -} - -func (utf8bomEncoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: &utf8bomDecoder{}} -} - -var utf8enc = &internal.Encoding{ - &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, - "UTF-8", - identifier.UTF8, -} - -type utf8bomDecoder struct { - checked bool -} - -func (t *utf8bomDecoder) Reset() { - t.checked = false -} - -func (t *utf8bomDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if !t.checked { - if !atEOF && len(src) < len(utf8BOM) { - if len(src) == 0 { - return 0, 0, nil - } - return 0, 0, transform.ErrShortSrc - } - if bytes.HasPrefix(src, []byte(utf8BOM)) { - nSrc += len(utf8BOM) - src = src[len(utf8BOM):] - } - t.checked = true - } - nDst, n, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF) - nSrc += n - return nDst, nSrc, err -} - -type utf8bomEncoder struct { - written bool - t transform.Transformer -} - -func (t *utf8bomEncoder) Reset() { - t.written = false - t.t.Reset() -} - -func (t *utf8bomEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if !t.written { - if len(dst) < len(utf8BOM) { - return nDst, 0, transform.ErrShortDst - } - nDst = copy(dst, utf8BOM) - t.written = true - } - n, nSrc, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF) - nDst += n - return nDst, nSrc, err -} - -type utf8Decoder struct{ transform.NopResetter } - -func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - var pSrc int // point from which to start copy in src - var accept utf8internal.AcceptRange - - // The decoder can only make the input larger, not smaller. - n := len(src) - if len(dst) < n { - err = transform.ErrShortDst - n = len(dst) - atEOF = false - } - for nSrc < n { - c := src[nSrc] - if c < utf8.RuneSelf { - nSrc++ - continue - } - first := utf8internal.First[c] - size := int(first & utf8internal.SizeMask) - if first == utf8internal.FirstInvalid { - goto handleInvalid // invalid starter byte - } - accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift] - if nSrc+size > n { - if !atEOF { - // We may stop earlier than necessary here if the short sequence - // has invalid bytes. Not checking for this simplifies the code - // and may avoid duplicate computations in certain conditions. - if err == nil { - err = transform.ErrShortSrc - } - break - } - // Determine the maximal subpart of an ill-formed subsequence. - switch { - case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]: - size = 1 - case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]: - size = 2 - default: - size = 3 // As we are short, the maximum is 3. - } - goto handleInvalid - } - if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c { - size = 1 - goto handleInvalid // invalid continuation byte - } else if size == 2 { - } else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c { - size = 2 - goto handleInvalid // invalid continuation byte - } else if size == 3 { - } else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c { - size = 3 - goto handleInvalid // invalid continuation byte - } - nSrc += size - continue - - handleInvalid: - // Copy the scanned input so far. - nDst += copy(dst[nDst:], src[pSrc:nSrc]) - - // Append RuneError to the destination. - const runeError = "\ufffd" - if nDst+len(runeError) > len(dst) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += copy(dst[nDst:], runeError) - - // Skip the maximal subpart of an ill-formed subsequence according to - // the W3C standard way instead of the Go way. This Transform is - // probably the only place in the text repo where it is warranted. - nSrc += size - pSrc = nSrc - - // Recompute the maximum source length. - if sz := len(dst) - nDst; sz < len(src)-nSrc { - err = transform.ErrShortDst - n = nSrc + sz - atEOF = false - } - } - return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err -} - -// UTF16 returns a UTF-16 Encoding for the given default endianness and byte -// order mark (BOM) policy. -// -// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then -// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect -// the endianness used for decoding, and will instead be output as their -// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy -// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output. -// Instead, it overrides the default endianness e for the remainder of the -// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not -// affect the endianness used, and will instead be output as their standard -// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed -// with the default Endianness. For ExpectBOM, in that case, the transformation -// will return early with an ErrMissingBOM error. -// -// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of -// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not -// be inserted. The UTF-8 input does not need to contain a BOM. -// -// There is no concept of a 'native' endianness. If the UTF-16 data is produced -// and consumed in a greater context that implies a certain endianness, use -// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM. -// -// In the language of https://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM -// corresponds to "Where the precise type of the data stream is known... the -// BOM should not be used" and ExpectBOM corresponds to "A particular -// protocol... may require use of the BOM". -func UTF16(e Endianness, b BOMPolicy) encoding.Encoding { - return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]} -} - -// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that -// some configurations map to the same MIB identifier. RFC 2781 has requirements -// and recommendations. Some of the "configurations" are merely recommendations, -// so multiple configurations could match. -var mibValue = map[Endianness][numBOMValues]identifier.MIB{ - BigEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF16BE, - UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781. - // TODO: acceptBOM | strictBOM would map to UTF16BE as well. - }, - LittleEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF16LE, - UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows. - // TODO: acceptBOM | strictBOM would map to UTF16LE as well. - }, - // ExpectBOM is not widely used and has no valid MIB identifier. -} - -// All lists a configuration for each IANA-defined UTF-16 variant. -var All = []encoding.Encoding{ - UTF8, - UTF16(BigEndian, UseBOM), - UTF16(BigEndian, IgnoreBOM), - UTF16(LittleEndian, IgnoreBOM), -} - -// BOMPolicy is a UTF-16 encoding's byte order mark policy. -type BOMPolicy uint8 - -const ( - writeBOM BOMPolicy = 0x01 - acceptBOM BOMPolicy = 0x02 - requireBOM BOMPolicy = 0x04 - bomMask BOMPolicy = 0x07 - - // HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a - // map of an array of length 8 of a type that is also used as a key or value - // in another map). See golang.org/issue/11354. - // TODO: consider changing this value back to 8 if the use of 1.4.* has - // been minimized. - numBOMValues = 8 + 1 - - // IgnoreBOM means to ignore any byte order marks. - IgnoreBOM BOMPolicy = 0 - // Common and RFC 2781-compliant interpretation for UTF-16BE/LE. - - // UseBOM means that the UTF-16 form may start with a byte order mark, which - // will be used to override the default encoding. - UseBOM BOMPolicy = writeBOM | acceptBOM - // Common and RFC 2781-compliant interpretation for UTF-16. - - // ExpectBOM means that the UTF-16 form must start with a byte order mark, - // which will be used to override the default encoding. - ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM - // Used in Java as Unicode (not to be confused with Java's UTF-16) and - // ICU's UTF-16,version=1. Not compliant with RFC 2781. - - // TODO (maybe): strictBOM: BOM must match Endianness. This would allow: - // - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM - // (UnicodeBig and UnicodeLittle in Java) - // - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E: - // acceptBOM | strictBOM (e.g. assigned to CheckBOM). - // This addition would be consistent with supporting ExpectBOM. -) - -// Endianness is a UTF-16 encoding's default endianness. -type Endianness bool - -const ( - // BigEndian is UTF-16BE. - BigEndian Endianness = false - // LittleEndian is UTF-16LE. - LittleEndian Endianness = true -) - -// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a -// starting byte order mark. -var ErrMissingBOM = errors.New("encoding: missing byte order mark") - -type utf16Encoding struct { - config - mib identifier.MIB -} - -type config struct { - endianness Endianness - bomPolicy BOMPolicy -} - -func (u utf16Encoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: &utf16Decoder{ - initial: u.config, - current: u.config, - }} -} - -func (u utf16Encoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: &utf16Encoder{ - endianness: u.endianness, - initialBOMPolicy: u.bomPolicy, - currentBOMPolicy: u.bomPolicy, - }} -} - -func (u utf16Encoding) ID() (mib identifier.MIB, other string) { - return u.mib, "" -} - -func (u utf16Encoding) String() string { - e, b := "B", "" - if u.endianness == LittleEndian { - e = "L" - } - switch u.bomPolicy { - case ExpectBOM: - b = "Expect" - case UseBOM: - b = "Use" - case IgnoreBOM: - b = "Ignore" - } - return "UTF-16" + e + "E (" + b + " BOM)" -} - -type utf16Decoder struct { - initial config - current config -} - -func (u *utf16Decoder) Reset() { - u.current = u.initial -} - -func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if len(src) < 2 && atEOF && u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - if len(src) == 0 { - return 0, 0, nil - } - if len(src) >= 2 && u.current.bomPolicy&acceptBOM != 0 { - switch { - case src[0] == 0xfe && src[1] == 0xff: - u.current.endianness = BigEndian - nSrc = 2 - case src[0] == 0xff && src[1] == 0xfe: - u.current.endianness = LittleEndian - nSrc = 2 - default: - if u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - } - u.current.bomPolicy = IgnoreBOM - } - - var r rune - var dSize, sSize int - for nSrc < len(src) { - if nSrc+1 < len(src) { - x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1]) - if u.current.endianness == LittleEndian { - x = x>>8 | x<<8 - } - r, sSize = rune(x), 2 - if utf16.IsSurrogate(r) { - if nSrc+3 < len(src) { - x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3]) - if u.current.endianness == LittleEndian { - x = x>>8 | x<<8 - } - // Save for next iteration if it is not a high surrogate. - if isHighSurrogate(rune(x)) { - r, sSize = utf16.DecodeRune(r, rune(x)), 4 - } - } else if !atEOF { - err = transform.ErrShortSrc - break - } - } - if dSize = utf8.RuneLen(r); dSize < 0 { - r, dSize = utf8.RuneError, 3 - } - } else if atEOF { - // Single trailing byte. - r, dSize, sSize = utf8.RuneError, 3, 1 - } else { - err = transform.ErrShortSrc - break - } - if nDst+dSize > len(dst) { - err = transform.ErrShortDst - break - } - nDst += utf8.EncodeRune(dst[nDst:], r) - nSrc += sSize - } - return nDst, nSrc, err -} - -func isHighSurrogate(r rune) bool { - return 0xDC00 <= r && r <= 0xDFFF -} - -type utf16Encoder struct { - endianness Endianness - initialBOMPolicy BOMPolicy - currentBOMPolicy BOMPolicy -} - -func (u *utf16Encoder) Reset() { - u.currentBOMPolicy = u.initialBOMPolicy -} - -func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if u.currentBOMPolicy&writeBOM != 0 { - if len(dst) < 2 { - return 0, 0, transform.ErrShortDst - } - dst[0], dst[1] = 0xfe, 0xff - u.currentBOMPolicy = IgnoreBOM - nDst = 2 - } - - r, size := rune(0), 0 - for nSrc < len(src) { - r = rune(src[nSrc]) - - // Decode a 1-byte rune. - if r < utf8.RuneSelf { - size = 1 - - } else { - // Decode a multi-byte rune. - r, size = utf8.DecodeRune(src[nSrc:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - } - } - - if r <= 0xffff { - if nDst+2 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = uint8(r >> 8) - dst[nDst+1] = uint8(r) - nDst += 2 - } else { - if nDst+4 > len(dst) { - err = transform.ErrShortDst - break - } - r1, r2 := utf16.EncodeRune(r) - dst[nDst+0] = uint8(r1 >> 8) - dst[nDst+1] = uint8(r1) - dst[nDst+2] = uint8(r2 >> 8) - dst[nDst+3] = uint8(r2) - nDst += 4 - } - nSrc += size - } - - if u.endianness == LittleEndian { - for i := 0; i < nDst; i += 2 { - dst[i], dst[i+1] = dst[i+1], dst[i] - } - } - return nDst, nSrc, err -} diff --git a/openshift/tools/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/openshift/tools/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go deleted file mode 100644 index e5c53b1b3e..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package utf8internal contains low-level utf8-related constants, tables, etc. -// that are used internally by the text package. -package utf8internal - -// The default lowest and highest continuation byte. -const ( - LoCB = 0x80 // 1000 0000 - HiCB = 0xBF // 1011 1111 -) - -// Constants related to getting information of first bytes of UTF-8 sequences. -const ( - // ASCII identifies a UTF-8 byte as ASCII. - ASCII = as - - // FirstInvalid indicates a byte is invalid as a first byte of a UTF-8 - // sequence. - FirstInvalid = xx - - // SizeMask is a mask for the size bits. Use use x&SizeMask to get the size. - SizeMask = 7 - - // AcceptShift is the right-shift count for the first byte info byte to get - // the index into the AcceptRanges table. See AcceptRanges. - AcceptShift = 4 - - // The names of these constants are chosen to give nice alignment in the - // table below. The first nibble is an index into acceptRanges or F for - // special one-byte cases. The second nibble is the Rune length or the - // Status for the special one-byte case. - xx = 0xF1 // invalid: size 1 - as = 0xF0 // ASCII: size 1 - s1 = 0x02 // accept 0, size 2 - s2 = 0x13 // accept 1, size 3 - s3 = 0x03 // accept 0, size 3 - s4 = 0x23 // accept 2, size 3 - s5 = 0x34 // accept 3, size 4 - s6 = 0x04 // accept 0, size 4 - s7 = 0x44 // accept 4, size 4 -) - -// First is information about the first byte in a UTF-8 sequence. -var First = [256]uint8{ - // 1 2 3 4 5 6 7 8 9 A B C D E F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F - // 1 2 3 4 5 6 7 8 9 A B C D E F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF - xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF - s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF - s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF - s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF -} - -// AcceptRange gives the range of valid values for the second byte in a UTF-8 -// sequence for any value for First that is not ASCII or FirstInvalid. -type AcceptRange struct { - Lo uint8 // lowest value for second byte. - Hi uint8 // highest value for second byte. -} - -// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b -// -// AcceptRanges[First[b[0]]>>AcceptShift] -// -// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting -// at b[0]. -var AcceptRanges = [...]AcceptRange{ - 0: {LoCB, HiCB}, - 1: {0xA0, HiCB}, - 2: {LoCB, 0x9F}, - 3: {0x90, HiCB}, - 4: {LoCB, 0x8F}, -} diff --git a/openshift/tools/vendor/golang.org/x/text/runes/cond.go b/openshift/tools/vendor/golang.org/x/text/runes/cond.go deleted file mode 100644 index df7aa02db6..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/runes/cond.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runes - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. -// This is done for various reasons: -// - To retain the semantics of the Nop transformer: if input is passed to a Nop -// one would expect it to be unchanged. -// - It would be very expensive to pass a converted RuneError to a transformer: -// a transformer might need more source bytes after RuneError, meaning that -// the only way to pass it safely is to create a new buffer and manage the -// intermingling of RuneErrors and normal input. -// - Many transformers leave ill-formed UTF-8 as is, so this is not -// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a -// logical consequence of the operation (as for Map) or if it otherwise would -// pose security concerns (as for Remove). -// - An alternative would be to return an error on ill-formed UTF-8, but this -// would be inconsistent with other operations. - -// If returns a transformer that applies tIn to consecutive runes for which -// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset -// is called on tIn and tNotIn at the start of each run. A Nop transformer will -// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated -// to RuneError to determine which transformer to apply, but is passed as is to -// the respective transformer. -func If(s Set, tIn, tNotIn transform.Transformer) Transformer { - if tIn == nil && tNotIn == nil { - return Transformer{transform.Nop} - } - if tIn == nil { - tIn = transform.Nop - } - if tNotIn == nil { - tNotIn = transform.Nop - } - sIn, ok := tIn.(transform.SpanningTransformer) - if !ok { - sIn = dummySpan{tIn} - } - sNotIn, ok := tNotIn.(transform.SpanningTransformer) - if !ok { - sNotIn = dummySpan{tNotIn} - } - - a := &cond{ - tIn: sIn, - tNotIn: sNotIn, - f: s.Contains, - } - a.Reset() - return Transformer{a} -} - -type dummySpan struct{ transform.Transformer } - -func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { - return 0, transform.ErrEndOfSpan -} - -type cond struct { - tIn, tNotIn transform.SpanningTransformer - f func(rune) bool - check func(rune) bool // current check to perform - t transform.SpanningTransformer // current transformer to use -} - -// Reset implements transform.Transformer. -func (t *cond) Reset() { - t.check = t.is - t.t = t.tIn - t.t.Reset() // notIn will be reset on first usage. -} - -func (t *cond) is(r rune) bool { - if t.f(r) { - return true - } - t.check = t.isNot - t.t = t.tNotIn - t.tNotIn.Reset() - return false -} - -func (t *cond) isNot(r rune) bool { - if !t.f(r) { - return true - } - t.check = t.is - t.t = t.tIn - t.tIn.Reset() - return false -} - -// This implementation of Span doesn't help all too much, but it needs to be -// there to satisfy this package's Transformer interface. -// TODO: there are certainly room for improvements, though. For example, if -// t.t == transform.Nop (which will a common occurrence) it will save a bundle -// to special-case that loop. -func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { - p := 0 - for n < len(src) && err == nil { - // Don't process too much at a time as the Spanner that will be - // called on this block may terminate early. - const maxChunk = 4096 - max := len(src) - if v := n + maxChunk; v < max { - max = v - } - atEnd := false - size := 0 - current := t.t - for ; p < max; p += size { - r := rune(src[p]) - if r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { - if !atEOF && !utf8.FullRune(src[p:]) { - err = transform.ErrShortSrc - break - } - } - if !t.check(r) { - // The next rune will be the start of a new run. - atEnd = true - break - } - } - n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) - n += n2 - if err2 != nil { - return n, err2 - } - // At this point either err != nil or t.check will pass for the rune at p. - p = n + size - } - return n, err -} - -func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - p := 0 - for nSrc < len(src) && err == nil { - // Don't process too much at a time, as the work might be wasted if the - // destination buffer isn't large enough to hold the result or a - // transform returns an error early. - const maxChunk = 4096 - max := len(src) - if n := nSrc + maxChunk; n < len(src) { - max = n - } - atEnd := false - size := 0 - current := t.t - for ; p < max; p += size { - r := rune(src[p]) - if r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { - if !atEOF && !utf8.FullRune(src[p:]) { - err = transform.ErrShortSrc - break - } - } - if !t.check(r) { - // The next rune will be the start of a new run. - atEnd = true - break - } - } - nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) - nDst += nDst2 - nSrc += nSrc2 - if err2 != nil { - return nDst, nSrc, err2 - } - // At this point either err != nil or t.check will pass for the rune at p. - p = nSrc + size - } - return nDst, nSrc, err -} diff --git a/openshift/tools/vendor/golang.org/x/text/runes/runes.go b/openshift/tools/vendor/golang.org/x/text/runes/runes.go deleted file mode 100644 index 930e87fedb..0000000000 --- a/openshift/tools/vendor/golang.org/x/text/runes/runes.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package runes provide transforms for UTF-8 encoded text. -package runes // import "golang.org/x/text/runes" - -import ( - "unicode" - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// A Set is a collection of runes. -type Set interface { - // Contains returns true if r is contained in the set. - Contains(r rune) bool -} - -type setFunc func(rune) bool - -func (s setFunc) Contains(r rune) bool { - return s(r) -} - -// Note: using funcs here instead of wrapping types result in cleaner -// documentation and a smaller API. - -// In creates a Set with a Contains method that returns true for all runes in -// the given RangeTable. -func In(rt *unicode.RangeTable) Set { - return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) -} - -// NotIn creates a Set with a Contains method that returns true for all runes not -// in the given RangeTable. -func NotIn(rt *unicode.RangeTable) Set { - return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) -} - -// Predicate creates a Set with a Contains method that returns f(r). -func Predicate(f func(rune) bool) Set { - return setFunc(f) -} - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - t transform.SpanningTransformer -} - -func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return t.t.Transform(dst, src, atEOF) -} - -func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { - return t.t.Span(b, atEOF) -} - -func (t Transformer) Reset() { t.t.Reset() } - -// Bytes returns a new byte slice with the result of converting b using t. It -// calls Reset on t. It returns nil if any error was found. This can only happen -// if an error-producing Transformer is passed to If. -func (t Transformer) Bytes(b []byte) []byte { - b, _, err := transform.Bytes(t, b) - if err != nil { - return nil - } - return b -} - -// String returns a string with the result of converting s using t. It calls -// Reset on t. It returns the empty string if any error was found. This can only -// happen if an error-producing Transformer is passed to If. -func (t Transformer) String(s string) string { - s, _, err := transform.String(t, s) - if err != nil { - return "" - } - return s -} - -// TODO: -// - Copy: copying strings and bytes in whole-rune units. -// - Validation (maybe) -// - Well-formed-ness (maybe) - -const runeErrorString = string(utf8.RuneError) - -// Remove returns a Transformer that removes runes r for which s.Contains(r). -// Illegal input bytes are replaced by RuneError before being passed to f. -func Remove(s Set) Transformer { - if f, ok := s.(setFunc); ok { - // This little trick cuts the running time of BenchmarkRemove for sets - // created by Predicate roughly in half. - // TODO: special-case RangeTables as well. - return Transformer{remove(f)} - } - return Transformer{remove(s.Contains)} -} - -// TODO: remove transform.RemoveFunc. - -type remove func(r rune) bool - -func (remove) Reset() {} - -// Span implements transform.Spanner. -func (t remove) Span(src []byte, atEOF bool) (n int, err error) { - for r, size := rune(0), 0; n < len(src); { - if r = rune(src[n]); r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[n:]) { - err = transform.ErrShortSrc - } else { - err = transform.ErrEndOfSpan - } - break - } - if t(r) { - err = transform.ErrEndOfSpan - break - } - n += size - } - return -} - -// Transform implements transform.Transformer. -func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for r, size := rune(0), 0; nSrc < len(src); { - if r = rune(src[nSrc]); r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - // We replace illegal bytes with RuneError. Not doing so might - // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. - // The resulting byte sequence may subsequently contain runes - // for which t(r) is true that were passed unnoticed. - if !t(utf8.RuneError) { - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - } - nSrc++ - continue - } - if t(r) { - nSrc += size - continue - } - if nDst+size > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < size; i++ { - dst[nDst] = src[nSrc] - nDst++ - nSrc++ - } - } - return -} - -// Map returns a Transformer that maps the runes in the input using the given -// mapping. Illegal bytes in the input are converted to utf8.RuneError before -// being passed to the mapping func. -func Map(mapping func(rune) rune) Transformer { - return Transformer{mapper(mapping)} -} - -type mapper func(rune) rune - -func (mapper) Reset() {} - -// Span implements transform.Spanner. -func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { - for r, size := rune(0), 0; n < len(src); n += size { - if r = rune(src[n]); r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[n:]) { - err = transform.ErrShortSrc - } else { - err = transform.ErrEndOfSpan - } - break - } - if t(r) != r { - err = transform.ErrEndOfSpan - break - } - } - return n, err -} - -// Transform implements transform.Transformer. -func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - var replacement rune - var b [utf8.UTFMax]byte - - for r, size := rune(0), 0; nSrc < len(src); { - if r = rune(src[nSrc]); r < utf8.RuneSelf { - if replacement = t(r); replacement < utf8.RuneSelf { - if nDst == len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst] = byte(replacement) - nDst++ - nSrc++ - continue - } - size = 1 - } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - - if replacement = t(utf8.RuneError); replacement == utf8.RuneError { - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - nSrc++ - continue - } - } else if replacement = t(r); replacement == r { - if nDst+size > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < size; i++ { - dst[nDst] = src[nSrc] - nDst++ - nSrc++ - } - continue - } - - n := utf8.EncodeRune(b[:], replacement) - - if nDst+n > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < n; i++ { - dst[nDst] = b[i] - nDst++ - } - nSrc += size - } - return -} - -// ReplaceIllFormed returns a transformer that replaces all input bytes that are -// not part of a well-formed UTF-8 code sequence with utf8.RuneError. -func ReplaceIllFormed() Transformer { - return Transformer{&replaceIllFormed{}} -} - -type replaceIllFormed struct{ transform.NopResetter } - -func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - // ASCII fast path. - if src[n] < utf8.RuneSelf { - n++ - continue - } - - r, size := utf8.DecodeRune(src[n:]) - - // Look for a valid non-ASCII rune. - if r != utf8.RuneError || size != 1 { - n += size - continue - } - - // Look for short source data. - if !atEOF && !utf8.FullRune(src[n:]) { - err = transform.ErrShortSrc - break - } - - // We have an invalid rune. - err = transform.ErrEndOfSpan - break - } - return n, err -} - -func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - // ASCII fast path. - if r := src[nSrc]; r < utf8.RuneSelf { - if nDst == len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst] = r - nDst++ - nSrc++ - continue - } - - // Look for a valid non-ASCII rune. - if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - err = transform.ErrShortDst - break - } - nDst += size - nSrc += size - continue - } - - // Look for short source data. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - - // We have an invalid rune. - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - nSrc++ - } - return nDst, nSrc, err -} diff --git a/openshift/tools/vendor/golang.org/x/text/unicode/bidi/core.go b/openshift/tools/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547b5..fb8273236d 100644 --- a/openshift/tools/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/openshift/tools/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/openshift/tools/vendor/golang.org/x/time/rate/rate.go b/openshift/tools/vendor/golang.org/x/time/rate/rate.go index 794b2e32bf..563270c154 100644 --- a/openshift/tools/vendor/golang.org/x/time/rate/rate.go +++ b/openshift/tools/vendor/golang.org/x/time/rate/rate.go @@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(t time.Time) { // update state r.lim.last = t r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { + if r.timeToAct.Equal(r.lim.lastEvent) { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent diff --git a/openshift/tools/vendor/golang.org/x/time/rate/sometimes.go b/openshift/tools/vendor/golang.org/x/time/rate/sometimes.go index 6ba99ddb67..9b83932692 100644 --- a/openshift/tools/vendor/golang.org/x/time/rate/sometimes.go +++ b/openshift/tools/vendor/golang.org/x/time/rate/sometimes.go @@ -61,7 +61,9 @@ func (s *Sometimes) Do(f func()) { (s.Every > 0 && s.count%s.Every == 0) || (s.Interval > 0 && time.Since(s.last) >= s.Interval) { f() - s.last = time.Now() + if s.Interval > 0 { + s.last = time.Now() + } } s.count++ } diff --git a/openshift/tools/vendor/gomodules.xyz/jsonpatch/v2/LICENSE b/openshift/tools/vendor/gomodules.xyz/jsonpatch/v2/LICENSE deleted file mode 100644 index 8f71f43fee..0000000000 --- a/openshift/tools/vendor/gomodules.xyz/jsonpatch/v2/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/openshift/tools/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/openshift/tools/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go deleted file mode 100644 index d88162ff57..0000000000 --- a/openshift/tools/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go +++ /dev/null @@ -1,253 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strings" -) - -var errBadJSONDoc = fmt.Errorf("invalid JSON Document") - -type JsonPatchOperation = Operation - -type Operation struct { - Operation string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value,omitempty"` -} - -func (j *Operation) Json() string { - b, _ := json.Marshal(j) - return string(b) -} - -func (j *Operation) MarshalJSON() ([]byte, error) { - // Ensure for add and replace we emit `value: null` - if j.Value == nil && (j.Operation == "replace" || j.Operation == "add") { - return json.Marshal(struct { - Operation string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value"` - }{ - Operation: j.Operation, - Path: j.Path, - }) - } - // otherwise just marshal normally. We cannot literally do json.Marshal(j) as it would be recursively - // calling this function. - return json.Marshal(struct { - Operation string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value,omitempty"` - }{ - Operation: j.Operation, - Path: j.Path, - Value: j.Value, - }) -} - -type ByPath []Operation - -func (a ByPath) Len() int { return len(a) } -func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } - -func NewOperation(op, path string, value interface{}) Operation { - return Operation{Operation: op, Path: path, Value: value} -} - -// CreatePatch creates a patch as specified in http://jsonpatch.com/ -// -// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. -// The function will return an array of JsonPatchOperations -// -// An error will be returned if any of the two documents are invalid. -func CreatePatch(a, b []byte) ([]Operation, error) { - if bytes.Equal(a, b) { - return []Operation{}, nil - } - var aI interface{} - var bI interface{} - aDec := json.NewDecoder(bytes.NewReader(a)) - aDec.UseNumber() - if err := aDec.Decode(&aI); err != nil { - return nil, errBadJSONDoc - } - bDec := json.NewDecoder(bytes.NewReader(b)) - bDec.UseNumber() - if err := bDec.Decode(&bI); err != nil { - return nil, errBadJSONDoc - } - return handleValues(aI, bI, "", []Operation{}) -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt, ok := bv.(string) - if ok && bt == at { - return true - } - case json.Number: - bt, ok := bv.(json.Number) - if ok && bt == at { - return true - } - case float64: - bt, ok := bv.(float64) - if ok && bt == at { - return true - } - case bool: - bt, ok := bv.(bool) - if ok && bt == at { - return true - } - case map[string]interface{}: - bt, ok := bv.(map[string]interface{}) - if !ok { - return false - } - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - case []interface{}: - bt, ok := bv.([]interface{}) - if !ok { - return false - } - if len(bt) != len(at) { - return false - } - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - } - return false -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. -// TODO decode support: -// var rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") - -var rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") - -func makePath(path string, newPart interface{}) string { - key := rfc6901Encoder.Replace(fmt.Sprintf("%v", newPart)) - if path == "" { - return "/" + key - } - return path + "/" + key -} - -// diff returns the (recursive) difference between a and b as an array of JsonPatchOperations. -func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) { - for key, bv := range b { - p := makePath(path, key) - av, ok := a[key] - // value was added - if !ok { - patch = append(patch, NewOperation("add", p, bv)) - continue - } - // Types are the same, compare values - var err error - patch, err = handleValues(av, bv, p, patch) - if err != nil { - return nil, err - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - p := makePath(path, key) - - patch = append(patch, NewOperation("remove", p, nil)) - } - } - return patch, nil -} - -func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) { - { - at := reflect.TypeOf(av) - bt := reflect.TypeOf(bv) - if at == nil && bt == nil { - // do nothing - return patch, nil - } else if at != bt { - // If types have changed, replace completely (preserves null in destination) - return append(patch, NewOperation("replace", p, bv)), nil - } - } - - var err error - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - patch, err = diff(at, bt, p, patch) - if err != nil { - return nil, err - } - case string, float64, bool, json.Number: - if !matchesValue(av, bv) { - patch = append(patch, NewOperation("replace", p, bv)) - } - case []interface{}: - bt := bv.([]interface{}) - n := min(len(at), len(bt)) - for i := len(at) - 1; i >= n; i-- { - patch = append(patch, NewOperation("remove", makePath(p, i), nil)) - } - for i := n; i < len(bt); i++ { - patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) - } - for i := 0; i < n; i++ { - var err error - patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) - if err != nil { - return nil, err - } - } - default: - panic(fmt.Sprintf("Unknown type:%T ", av)) - } - return patch, nil -} - -func min(x int, y int) int { - if y < x { - return y - } - return x -} diff --git a/openshift/tools/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/openshift/tools/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go deleted file mode 100644 index 2ef36bbcf9..0000000000 --- a/openshift/tools/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protodelim marshals and unmarshals varint size-delimited messages. -package protodelim - -import ( - "bufio" - "encoding/binary" - "fmt" - "io" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/proto" -) - -// MarshalOptions is a configurable varint size-delimited marshaler. -type MarshalOptions struct{ proto.MarshalOptions } - -// MarshalTo writes a varint size-delimited wire-format message to w. -// If w returns an error, MarshalTo returns it unchanged. -func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { - msgBytes, err := o.MarshalOptions.Marshal(m) - if err != nil { - return 0, err - } - - sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) - sizeWritten, err := w.Write(sizeBytes) - if err != nil { - return sizeWritten, err - } - msgWritten, err := w.Write(msgBytes) - if err != nil { - return sizeWritten + msgWritten, err - } - return sizeWritten + msgWritten, nil -} - -// MarshalTo writes a varint size-delimited wire-format message to w -// with the default options. -// -// See the documentation for [MarshalOptions.MarshalTo]. -func MarshalTo(w io.Writer, m proto.Message) (int, error) { - return MarshalOptions{}.MarshalTo(w, m) -} - -// UnmarshalOptions is a configurable varint size-delimited unmarshaler. -type UnmarshalOptions struct { - proto.UnmarshalOptions - - // MaxSize is the maximum size in wire-format bytes of a single message. - // Unmarshaling a message larger than MaxSize will return an error. - // A zero MaxSize will default to 4 MiB. - // Setting MaxSize to -1 disables the limit. - MaxSize int64 -} - -const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size - -// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size -// that is larger than its configured [UnmarshalOptions.MaxSize]. -type SizeTooLargeError struct { - // Size is the varint size of the message encountered - // that was larger than the provided MaxSize. - Size uint64 - - // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. - MaxSize uint64 -} - -func (e *SizeTooLargeError) Error() string { - return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) -} - -// Reader is the interface expected by [UnmarshalFrom]. -// It is implemented by *[bufio.Reader]. -type Reader interface { - io.Reader - io.ByteReader -} - -// UnmarshalFrom parses and consumes a varint size-delimited wire-format message -// from r. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -// -// The error is [io.EOF] error only if no bytes are read. -// If an EOF happens after reading some but not all the bytes, -// UnmarshalFrom returns a non-io.EOF error. -// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, -// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. -func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { - var sizeArr [binary.MaxVarintLen64]byte - sizeBuf := sizeArr[:0] - for i := range sizeArr { - b, err := r.ReadByte() - if err != nil { - // Immediate EOF is unexpected. - if err == io.EOF && i != 0 { - break - } - return err - } - sizeBuf = append(sizeBuf, b) - if b < 0x80 { - break - } - } - size, n := protowire.ConsumeVarint(sizeBuf) - if n < 0 { - return protowire.ParseError(n) - } - - maxSize := o.MaxSize - if maxSize == 0 { - maxSize = defaultMaxSize - } - if maxSize != -1 && size > uint64(maxSize) { - return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") - } - - var b []byte - var err error - if br, ok := r.(*bufio.Reader); ok { - // Use the []byte from the bufio.Reader instead of having to allocate one. - // This reduces CPU usage and allocated bytes. - b, err = br.Peek(int(size)) - if err == nil { - defer br.Discard(int(size)) - } else { - b = nil - } - } - if b == nil { - b = make([]byte, size) - _, err = io.ReadFull(r, b) - } - - if err == io.EOF { - return io.ErrUnexpectedEOF - } - if err != nil { - return err - } - if err := o.Unmarshal(b, m); err != nil { - return err - } - return nil -} - -// UnmarshalFrom parses and consumes a varint size-delimited wire-format message -// from r with the default options. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -// -// See the documentation for [UnmarshalOptions.UnmarshalFrom]. -func UnmarshalFrom(r Reader, m proto.Message) error { - return UnmarshalOptions{}.UnmarshalFrom(r, m) -} diff --git a/openshift/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/openshift/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index e942bc983e..743bfb81d6 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) { func SizeVarint(v uint64) int { // This computes 1 + (bits.Len64(v)-1)/7. // 9/64 is a good enough approximation of 1/7 - return int(9*uint32(bits.Len64(v))+64) / 64 + // + // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT + // instruction, which is very fast on CPUs from the last few years. The + // specific way of expressing the calculation matches C++ Protobuf, see + // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang + // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell). + + // By OR'ing v with 1, we guarantee that v is never 0, without changing the + // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler + // needs to add extra instructions to handle that case. + // + // The Go compiler currently (go1.24.4) does not make use of this knowledge. + // This opportunity (removing the XOR instruction, which handles the 0 case) + // results in a small (1%) performance win across CPU architectures. + // + // Independently of avoiding the 0 case, we need the v |= 1 line because + // it allows the Go compiler to eliminate an extra XCHGL barrier. + v |= 1 + + // It would be clearer to write log2value := 63 - uint32(...), but + // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel). + // Proof of identity for our value range [0..63]: + // https://go.dev/play/p/Pdn9hEWYakX + log2value := uint32(bits.LeadingZeros64(v)) ^ 63 + return int((log2value*9 + (64 + 9)) / 64) } // AppendFixed32 appends v to b as a little-endian uint32. diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/openshift/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 323829da14..04696351ee 100644 Binary files a/openshift/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/openshift/tools/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 688aabe434..dbcf90b871 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -72,9 +72,10 @@ type ( EditionFeatures EditionFeatures } FileL2 struct { - Options func() protoreflect.ProtoMessage - Imports FileImports - Locations SourceLocations + Options func() protoreflect.ProtoMessage + Imports FileImports + OptionImports func() protoreflect.FileImports + Locations SourceLocations } // EditionFeatures is a frequently-instantiated struct, so please take care @@ -126,12 +127,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } - -// Not exported and just used to reconstruct the original FileDescriptor proto -func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -150,6 +148,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD func (fd *File) ProtoType(protoreflect.FileDescriptor) {} func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +// The next two are not part of the FileDescriptor interface. They are just used to reconstruct +// the original FileDescriptor proto. +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) OptionImports() protoreflect.FileImports { + if f := fd.lazyInit().OptionImports; f != nil { + return f() + } + return emptyFiles +} + func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { fd.lazyInitOnce() @@ -182,9 +190,9 @@ type ( L2 *EnumL2 // protected by fileDesc.once } EnumL1 struct { - eagerValues bool // controls whether EnumL2.Values is already populated - EditionFeatures EditionFeatures + Visibility int32 + eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -219,6 +227,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit() func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} + +// This is not part of the EnumDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (ed *Enum) Visibility() int32 { return ed.L1.Visibility } + func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 @@ -244,13 +257,13 @@ type ( L2 *MessageL2 // protected by fileDesc.once } MessageL1 struct { - Enums Enums - Messages Messages - Extensions Extensions - IsMapEntry bool // promoted from google.protobuf.MessageOptions - IsMessageSet bool // promoted from google.protobuf.MessageOptions - + Enums Enums + Messages Messages + Extensions Extensions EditionFeatures EditionFeatures + Visibility int32 + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -319,6 +332,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } + +// This is not part of the MessageDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (md *Message) Visibility() int32 { return md.L1.Visibility } + func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index d2f549497e..e91860f5a2 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl case genid.EnumDescriptorProto_Value_field_number: numValues++ } + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Visibility_field_number: + ed.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.unmarshalSeedOptions(v) } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Visibility_field_number: + md.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index d4c94458bd..dd31faaeb0 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) { var enumIdx, messageIdx, extensionIdx, serviceIdx int var rawOptions []byte + var optionImports []string fd.L2 = new(FileL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_OptionDependency_field_number: + optionImports = append(optionImports, sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) { } } fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) + if len(optionImports) > 0 { + var imps FileImports + var once sync.Once + fd.L2.OptionImports = func() protoreflect.FileImports { + once.Do(func() { + imps = make(FileImports, len(optionImports)) + for i, path := range optionImports { + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + imps[i] = protoreflect.FileImport{FileDescriptor: imp} + } + }) + return &imps + } + } } func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index b08b71830c..66ba906806 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -13,8 +13,10 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" ) -var defaultsCache = make(map[Edition]EditionFeatures) -var defaultsKeys = []Edition{} +var ( + defaultsCache = make(map[Edition]EditionFeatures) + defaultsKeys = []Edition{} +) func init() { unmarshalEditionDefaults(editiondefaults.Defaults) @@ -41,7 +43,7 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { b = b[m:] parent.StripEnumPrefix = int(v) default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num)) } } return parent @@ -72,8 +74,11 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { case genid.FeatureSet_EnforceNamingStyle_field_number: // EnforceNamingStyle is enforced in protoc, languages other than C++ // are not supposed to do anything with this feature. + case genid.FeatureSet_DefaultSymbolVisibility_field_number: + // DefaultSymbolVisibility is enforced in protoc, runtimes should not + // inspect this value. default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num)) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -147,7 +152,7 @@ func unmarshalEditionDefaults(b []byte) { _, m := protowire.ConsumeVarint(b) b = b[m:] default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num)) } } } diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/presence.go new file mode 100644 index 0000000000..a12ec9791c --- /dev/null +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/filedesc/presence.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import "google.golang.org/protobuf/reflect/protoreflect" + +// UsePresenceForField reports whether the presence bitmap should be used for +// the specified field. +func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneof fields never use the presence bitmap. + // + // Synthetic oneofs are an exception: Those are used to implement proto3 + // optional fields and hence should follow non-oneof field semantics. + return false, false + + case fd.IsMap(): + // Map-typed fields never use the presence bitmap. + return false, false + + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + // Lazy fields always use the presence bitmap (only messages can be lazy). + isLazy := fd.(interface{ IsLazy() bool }).IsLazy() + return isLazy, isLazy + + default: + // If the field has presence, use the presence bitmap. + return fd.HasPresence(), false + } +} diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f918501..3ceb6fa7f5 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 39524782ad..950a6a325a 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -34,6 +34,19 @@ const ( Edition_EDITION_MAX_enum_value = 2147483647 ) +// Full and short names for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility" + SymbolVisibility_enum_name = "SymbolVisibility" +) + +// Enum values for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_VISIBILITY_UNSET_enum_value = 0 + SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1 + SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -65,6 +78,7 @@ const ( FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency" FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" FileDescriptorProto_Service_field_name protoreflect.Name = "service" @@ -79,6 +93,7 @@ const ( FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency" FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" @@ -96,6 +111,7 @@ const ( FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15 FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 @@ -124,6 +140,7 @@ const ( DescriptorProto_Options_field_name protoreflect.Name = "options" DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + DescriptorProto_Visibility_field_name protoreflect.Name = "visibility" DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" @@ -135,6 +152,7 @@ const ( DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" + DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility" ) // Field numbers for google.protobuf.DescriptorProto. @@ -149,6 +167,7 @@ const ( DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 + DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11 ) // Names for google.protobuf.DescriptorProto.ExtensionRange. @@ -388,12 +407,14 @@ const ( EnumDescriptorProto_Options_field_name protoreflect.Name = "options" EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility" EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" + EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility" ) // Field numbers for google.protobuf.EnumDescriptorProto. @@ -403,6 +424,7 @@ const ( EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 + EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. @@ -1008,32 +1030,35 @@ const ( // Field names for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" - FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" - FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" - FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" - FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" - FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" - FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" - - FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" - FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" - FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" - FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" - FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" - FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" - FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility" ) // Field numbers for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 - FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 - FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 - FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 - FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 - FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 - FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1128,6 +1153,27 @@ const ( FeatureSet_STYLE_LEGACY_enum_value = 2 ) +// Names for google.protobuf.FeatureSet.VisibilityFeature. +const ( + FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature" + FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature" +) + +// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility" + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility" +) + +// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0 + FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1 + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2 + FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3 + FeatureSet_VisibilityFeature_STRICT_enum_value = 4 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 41c1f74ef8..bdad12a9bb 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf // permit us to skip over definitely-unset fields at marshal time. var hasPresence bool - hasPresence, cf.isLazy = usePresenceForField(si, fd) + hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd) if hasPresence { cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index dd55e8e009..5a439daacb 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -11,6 +11,7 @@ import ( "strings" "sync/atomic" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] var fi fieldInfo - usePresence, _ := usePresenceForField(si, fd) + usePresence, _ := filedesc.UsePresenceForField(fd) switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): @@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return false } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return false } - rv := sp.AsValueOf(fs.Type.Elem()) return rv.Elem().Len() > 0 }, clear: func(p pointer) { - sp := p.Apply(fieldOffset).AtomicGetPointer() - if !sp.IsNil() { - rv := sp.AsValueOf(fs.Type.Elem()) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if !rv.IsNil() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } }, @@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return conv.Zero() } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return conv.Zero() } - rv := sp.AsValueOf(fs.Type.Elem()) if rv.Elem().Len() == 0 { return conv.Zero() } @@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) { func (mi *MessageInfo) present(p pointer, index uint32) bool { return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) } - -// usePresenceForField implements the somewhat intricate logic of when -// the presence bitmap is used for a field. The main logic is that a -// field that is optional or that can be lazy will use the presence -// bit, but for proto2, also maps have a presence bit. It also records -// if the field can ever be lazy, which is true if we have a -// lazyOffset and the field is a message or a slice of messages. A -// field that is lazy will always need a presence bit. Oneofs are not -// lazy and do not use presence, unless they are a synthetic oneof, -// which is a proto3 optional field. For proto3 optionals, we use the -// presence and they can also be lazy when applicable (a message). -func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { - hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() - - // Non-oneof scalar fields with explicit field presence use the presence array. - usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) - switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - return false, false - case fd.IsMap(): - return false, false - case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: - return hasLazyField, hasLazyField - default: - return usesPresenceArray || (hasLazyField && fd.HasPresence()), false - } -} diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go index 914cb1deda..443afe81cd 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) { // Present checks for the presence of a specific field number in a presence set. func (p presence) Present(num uint32) bool { - if p.P == nil { - return false - } return Export{}.Present(p.toElem(num), num) } diff --git a/openshift/tools/vendor/google.golang.org/protobuf/internal/version/version.go b/openshift/tools/vendor/google.golang.org/protobuf/internal/version/version.go index aac1cb18a7..77de0f238c 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 6 + Patch = 10 PreRelease = "" ) diff --git a/openshift/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/openshift/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index a4a0a2971d..730331e666 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "public_dependency", nil) case 11: b = p.appendRepeatedField(b, "weak_dependency", nil) + case 15: + b = p.appendRepeatedField(b, "option_dependency", nil) case 4: b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) case 5: @@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) case 10: b = p.appendRepeatedField(b, "reserved_name", nil) + case 11: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) case 5: b = p.appendRepeatedField(b, "reserved_name", nil) + case 6: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -400,6 +406,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "json_format", nil) case 7: b = p.appendSingularField(b, "enforce_naming_style", nil) + case 8: + b = p.appendSingularField(b, "default_symbol_visibility", nil) } return b } diff --git a/openshift/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/openshift/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 7fe280f194..4eacb523c3 100644 --- a/openshift/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/openshift/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} } +// Describes the 'visibility' of a symbol with respect to the proto import +// system. Symbols can only be imported when the visibility rules do not prevent +// it (ex: local symbols cannot be imported). Visibility modifiers can only set +// on `message` and `enum` as they are the only types available to be referenced +// from other files. +type SymbolVisibility int32 + +const ( + SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0 + SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1 + SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2 +) + +// Enum value maps for SymbolVisibility. +var ( + SymbolVisibility_name = map[int32]string{ + 0: "VISIBILITY_UNSET", + 1: "VISIBILITY_LOCAL", + 2: "VISIBILITY_EXPORT", + } + SymbolVisibility_value = map[string]int32{ + "VISIBILITY_UNSET": 0, + "VISIBILITY_LOCAL": 1, + "VISIBILITY_EXPORT": 2, + } +) + +func (x SymbolVisibility) Enum() *SymbolVisibility { + p := new(SymbolVisibility) + *p = x + return p +} + +func (x SymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (SymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x SymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *SymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = SymbolVisibility(num) + return nil +} + +// Deprecated: Use SymbolVisibility.Descriptor instead. +func (SymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[10] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string { } func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() } func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] + return &file_google_protobuf_descriptor_proto_enumTypes[11] } func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { @@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string { } func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() } func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] + return &file_google_protobuf_descriptor_proto_enumTypes[12] } func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { @@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string { } func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() } func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] + return &file_google_protobuf_descriptor_proto_enumTypes[13] } func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { @@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string { } func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() } func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] + return &file_google_protobuf_descriptor_proto_enumTypes[14] } func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { @@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string { } func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() } func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] + return &file_google_protobuf_descriptor_proto_enumTypes[15] } func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { @@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string { } func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { @@ -1172,11 +1236,11 @@ func (x FeatureSet_EnforceNamingStyle) String() string { } func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { @@ -1198,6 +1262,77 @@ func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} } +type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32 + +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0 + // Default pre-EDITION_2024, all UNSET visibility are export. + FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1 + // All top-level symbols default to export, nested default to local. + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2 + // All symbols default to local. + FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3 + // All symbols local by default. Nested types cannot be exported. + // With special case caveat for message { enum {} reserved 1 to max; } + // This is the recommended setting for new protos. + FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4 +) + +// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility. +var ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{ + 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN", + 1: "EXPORT_ALL", + 2: "EXPORT_TOP_LEVEL", + 3: "LOCAL_ALL", + 4: "STRICT", + } + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{ + "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0, + "EXPORT_ALL": 1, + "EXPORT_TOP_LEVEL": 2, + "LOCAL_ALL": 3, + "STRICT": 4, + } +) + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility) + *p = x + return p +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor() +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[18] +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num) + return nil +} + +// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead. +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1236,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[17] + return &file_google_protobuf_descriptor_proto_enumTypes[19] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1321,6 +1456,9 @@ type FileDescriptorProto struct { // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // Names of files imported by this file purely for the purpose of providing + // option extensions. These are excluded from the dependency list above. + OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"` // All top-level definitions in this file. MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` @@ -1414,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 { return nil } +func (x *FileDescriptorProto) GetOptionDependency() []string { + if x != nil { + return x.OptionDependency + } + return nil +} + func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { if x != nil { return x.MessageType @@ -1484,7 +1629,9 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1589,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string { return nil } +func (x *DescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + type ExtensionRangeOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. @@ -1901,7 +2055,9 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1971,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string { return nil } +func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2710,7 +2873,10 @@ type FieldOptions struct { // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // DEPRECATED. DO NOT USE! // For Google-internal migration only. Do not use. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. @@ -2814,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool { return Default_FieldOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetWeak() bool { if x != nil && x.Weak != nil { return *x.Weak @@ -3392,17 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` - EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` + DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { @@ -3484,6 +3652,13 @@ func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN } +func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + if x != nil && x.DefaultSymbolVisibility != nil { + return *x.DefaultSymbolVisibility + } + return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4144,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +type FeatureSet_VisibilityFeature struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FeatureSet_VisibilityFeature) Reset() { + *x = FeatureSet_VisibilityFeature{} + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FeatureSet_VisibilityFeature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet_VisibilityFeature) ProtoMessage() {} + +func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead. +func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + // A map from every known edition with a unique set of defaults to its // defaults. Not all editions may be contained here. For a given edition, // the defaults at the closest matching edition ordered at or before it should @@ -4161,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4173,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4309,7 +4520,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4321,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4393,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4405,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4462,7 +4673,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\n" + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + "\x11FileDescriptorSet\x128\n" + - "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" + "\x13FileDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + @@ -4471,7 +4682,8 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "dependency\x12+\n" + "\x11public_dependency\x18\n" + " \x03(\x05R\x10publicDependency\x12'\n" + - "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" + + "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + @@ -4479,7 +4691,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + - "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" + "\x0fDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + @@ -4493,7 +4705,10 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + "\rreserved_name\x18\n" + - " \x03(\tR\freservedName\x1az\n" + + " \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1az\n" + "\x0eExtensionRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + @@ -4562,13 +4777,16 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + "\x14OneofDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + - "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" + "\x13EnumDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + - "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1a;\n" + "\x11EnumReservedRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + @@ -4629,7 +4847,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + - "\"\x9d\r\n" + + "\"\xa1\r\n" + "\fFieldOptions\x12A\n" + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + @@ -4638,9 +4856,9 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + "\n" + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + - "deprecated\x12\x19\n" + + "deprecated\x12\x1d\n" + "\x04weak\x18\n" + - " \x01(\b:\x05falseR\x04weak\x12(\n" + + " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + @@ -4728,7 +4946,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + "\bNamePart\x12\x1b\n" + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + - "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" + "\n" + "FeatureSet\x12\x91\x01\n" + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + @@ -4739,7 +4957,18 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + "jsonFormat\x12\xab\x01\n" + - "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" + + "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" + + "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" + + "\x11VisibilityFeature\"\x81\x01\n" + + "\x17DefaultSymbolVisibility\x12%\n" + + "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" + + "\n" + + "EXPORT_ALL\x10\x01\x12\x14\n" + + "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" + + "\tLOCAL_ALL\x10\x03\x12\n" + + "\n" + + "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" + "\rFieldPresence\x12\x1a\n" + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + "\bEXPLICIT\x10\x01\x12\f\n" + @@ -4817,7 +5046,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + - "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" + + "\x10SymbolVisibility\x12\x14\n" + + "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" + + "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" + + "\x11VISIBILITY_EXPORT\x10\x02B~\n" + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( @@ -4832,145 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34) var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle - (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 28: google.protobuf.FileOptions - (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility + (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat + (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle + (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 30: google.protobuf.FileOptions + (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart + (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle - 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 78, // [78:78] is the sub-list for method output_type - 78, // [78:78] is the sub-list for method input_type - 78, // [78:78] is the sub-list for extension type_name - 78, // [78:78] is the sub-list for extension extendee - 0, // [0:78] is the sub-list for field type_name + 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4983,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 18, - NumMessages: 33, + NumEnums: 20, + NumMessages: 34, NumExtensions: 0, NumServices: 0, }, diff --git a/openshift/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/openshift/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go deleted file mode 100644 index 06d584c14b..0000000000 --- a/openshift/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ /dev/null @@ -1,355 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -// Package timestamppb contains generated types for google/protobuf/timestamp.proto. -// -// The Timestamp message represents a timestamp, -// an instant in time since the Unix epoch (January 1st, 1970). -// -// # Conversion to a Go Time -// -// The AsTime method can be used to convert a Timestamp message to a -// standard Go time.Time value in UTC: -// -// t := ts.AsTime() -// ... // make use of t as a time.Time -// -// Converting to a time.Time is a common operation so that the extensive -// set of time-based operations provided by the time package can be leveraged. -// See https://golang.org/pkg/time for more information. -// -// The AsTime method performs the conversion on a best-effort basis. Timestamps -// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) -// are normalized during the conversion to a time.Time. To manually check for -// invalid Timestamps per the documented limitations in timestamp.proto, -// additionally call the CheckValid method: -// -// if err := ts.CheckValid(); err != nil { -// ... // handle error -// } -// -// # Conversion from a Go Time -// -// The timestamppb.New function can be used to construct a Timestamp message -// from a standard Go time.Time value: -// -// ts := timestamppb.New(t) -// ... // make use of ts as a *timestamppb.Timestamp -// -// In order to construct a Timestamp representing the current time, use Now: -// -// ts := timestamppb.Now() -// ... // make use of ts as a *timestamppb.Timestamp -package timestamppb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - time "time" - unsafe "unsafe" -) - -// A Timestamp represents a point in time independent of any time zone or local -// calendar, encoded as a count of seconds and fractions of seconds at -// nanosecond resolution. The count is relative to an epoch at UTC midnight on -// January 1, 1970, in the proleptic Gregorian calendar which extends the -// Gregorian calendar backwards to year one. -// -// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap -// second table is needed for interpretation, using a [24-hour linear -// smear](https://developers.google.com/time/smear). -// -// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By -// restricting to that range, we ensure that we can convert to and from [RFC -// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// Example 5: Compute Timestamp from Java `Instant.now()`. -// -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); -// -// Example 6: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard -// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using -// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with -// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use -// the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() -// ) to obtain a formatter capable of generating timestamps in this format. -type Timestamp struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Now constructs a new Timestamp from the current time. -func Now() *Timestamp { - return New(time.Now()) -} - -// New constructs a new Timestamp from the provided time.Time. -func New(t time.Time) *Timestamp { - return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} -} - -// AsTime converts x to a time.Time. -func (x *Timestamp) AsTime() time.Time { - return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() -} - -// IsValid reports whether the timestamp is valid. -// It is equivalent to CheckValid == nil. -func (x *Timestamp) IsValid() bool { - return x.check() == 0 -} - -// CheckValid returns an error if the timestamp is invalid. -// In particular, it checks whether the value represents a date that is -// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. -// An error is reported for a nil Timestamp. -func (x *Timestamp) CheckValid() error { - switch x.check() { - case invalidNil: - return protoimpl.X.NewError("invalid nil Timestamp") - case invalidUnderflow: - return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) - case invalidOverflow: - return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) - case invalidNanos: - return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) - default: - return nil - } -} - -const ( - _ = iota - invalidNil - invalidUnderflow - invalidOverflow - invalidNanos -) - -func (x *Timestamp) check() uint { - const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive - const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive - secs := x.GetSeconds() - nanos := x.GetNanos() - switch { - case x == nil: - return invalidNil - case secs < minTimestamp: - return invalidUnderflow - case secs > maxTimestamp: - return invalidOverflow - case nanos < 0 || nanos >= 1e9: - return invalidNanos - default: - return 0 - } -} - -func (x *Timestamp) Reset() { - *x = Timestamp{} - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Timestamp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Timestamp) ProtoMessage() {} - -func (x *Timestamp) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. -func (*Timestamp) Descriptor() ([]byte, []int) { - return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} -} - -func (x *Timestamp) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} - -func (x *Timestamp) GetNanos() int32 { - if x != nil { - return x.Nanos - } - return 0 -} - -var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor - -const file_google_protobuf_timestamp_proto_rawDesc = "" + - "\n" + - "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" + - "\tTimestamp\x12\x18\n" + - "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + - "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" + - "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" - -var ( - file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData []byte -) - -func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { - file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) - }) - return file_google_protobuf_timestamp_proto_rawDescData -} - -var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []any{ - (*Timestamp)(nil), // 0: google.protobuf.Timestamp -} -var file_google_protobuf_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_timestamp_proto_init() } -func file_google_protobuf_timestamp_proto_init() { - if File_google_protobuf_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_timestamp_proto_goTypes, - DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, - MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, - }.Build() - File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_goTypes = nil - file_google_protobuf_timestamp_proto_depIdxs = nil -} diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/doc.go b/openshift/tools/vendor/k8s.io/api/admission/v1/doc.go deleted file mode 100644 index cab6528214..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=false -// +k8s:prerelease-lifecycle-gen=true -// +groupName=admission.k8s.io - -package v1 diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/generated.pb.go b/openshift/tools/vendor/k8s.io/api/admission/v1/generated.pb.go deleted file mode 100644 index f5c4179198..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/generated.pb.go +++ /dev/null @@ -1,1782 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/admission/v1/generated.proto - -package v1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } -func (*AdmissionRequest) ProtoMessage() {} -func (*AdmissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b47d27831186ccf, []int{0} -} -func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionRequest.Merge(m, src) -} -func (m *AdmissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AdmissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo - -func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } -func (*AdmissionResponse) ProtoMessage() {} -func (*AdmissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b47d27831186ccf, []int{1} -} -func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionResponse.Merge(m, src) -} -func (m *AdmissionResponse) XXX_Size() int { - return m.Size() -} -func (m *AdmissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo - -func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } -func (*AdmissionReview) ProtoMessage() {} -func (*AdmissionReview) Descriptor() ([]byte, []int) { - return fileDescriptor_7b47d27831186ccf, []int{2} -} -func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionReview.Merge(m, src) -} -func (m *AdmissionReview) XXX_Size() int { - return m.Size() -} -func (m *AdmissionReview) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionReview.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1.AdmissionRequest") - proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1.AdmissionResponse") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1.AdmissionResponse.AuditAnnotationsEntry") - proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1.AdmissionReview") -} - -func init() { - proto.RegisterFile("k8s.io/api/admission/v1/generated.proto", fileDescriptor_7b47d27831186ccf) -} - -var fileDescriptor_7b47d27831186ccf = []byte{ - // 907 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xba, 0xf2, 0x61, 0x6d, 0x72, 0x00, - 0x17, 0xb5, 0xbb, 0x24, 0x82, 0x2a, 0xaa, 0x40, 0x22, 0x4b, 0x2a, 0x14, 0x90, 0x9a, 0x68, 0xda, - 0x40, 0xc5, 0x01, 0x69, 0x62, 0x4f, 0xed, 0xc1, 0xf6, 0xcc, 0xb2, 0x33, 0xeb, 0xe0, 0x1b, 0x27, - 0xce, 0x7c, 0x03, 0x8e, 0x7c, 0x06, 0xbe, 0x41, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x2d, 0x72, - 0x42, 0x33, 0x3b, 0xfb, 0xa7, 0x89, 0x2d, 0x42, 0xc3, 0x29, 0xfb, 0xfe, 0xfc, 0x7e, 0xef, 0xe5, - 0xf7, 0xf6, 0xbd, 0x35, 0xf8, 0x70, 0xbc, 0x2b, 0x3c, 0xca, 0x7d, 0x1c, 0x52, 0x1f, 0x0f, 0xa6, - 0x54, 0x08, 0xca, 0x99, 0x3f, 0xdb, 0xf6, 0x87, 0x84, 0x91, 0x08, 0x4b, 0x32, 0xf0, 0xc2, 0x88, - 0x4b, 0x0e, 0xef, 0x25, 0x89, 0x1e, 0x0e, 0xa9, 0x97, 0x25, 0x7a, 0xb3, 0xed, 0xf6, 0xc3, 0x21, - 0x95, 0xa3, 0xf8, 0xc4, 0xeb, 0xf3, 0xa9, 0x3f, 0xe4, 0x43, 0xee, 0xeb, 0xfc, 0x93, 0xf8, 0xa5, - 0xb6, 0xb4, 0xa1, 0x9f, 0x12, 0x9e, 0xf6, 0x83, 0x62, 0xc1, 0x58, 0x8e, 0x08, 0x93, 0xb4, 0x8f, - 0xe5, 0xea, 0xaa, 0xed, 0x4f, 0xf2, 0xec, 0x29, 0xee, 0x8f, 0x28, 0x23, 0xd1, 0xdc, 0x0f, 0xc7, - 0x43, 0xe5, 0x10, 0xfe, 0x94, 0x48, 0xbc, 0x0a, 0xe5, 0xaf, 0x43, 0x45, 0x31, 0x93, 0x74, 0x4a, - 0xae, 0x00, 0x1e, 0xfd, 0x1b, 0x40, 0xf4, 0x47, 0x64, 0x8a, 0x2f, 0xe3, 0xb6, 0x7e, 0xb7, 0x41, - 0x6b, 0x2f, 0x15, 0x03, 0x91, 0x9f, 0x62, 0x22, 0x24, 0x0c, 0x40, 0x39, 0xa6, 0x03, 0xc7, 0xea, - 0x5a, 0x3d, 0x3b, 0xf8, 0xf8, 0x6c, 0xd1, 0x29, 0x2d, 0x17, 0x9d, 0xf2, 0xf1, 0xc1, 0xfe, 0xc5, - 0xa2, 0xf3, 0xfe, 0xba, 0x42, 0x72, 0x1e, 0x12, 0xe1, 0x1d, 0x1f, 0xec, 0x23, 0x05, 0x86, 0x2f, - 0x40, 0x65, 0x4c, 0xd9, 0xc0, 0xb9, 0xd5, 0xb5, 0x7a, 0x8d, 0x9d, 0x47, 0x5e, 0x2e, 0x7e, 0x06, - 0xf3, 0xc2, 0xf1, 0x50, 0x39, 0x84, 0xa7, 0x64, 0xf0, 0x66, 0xdb, 0xde, 0x57, 0x11, 0x8f, 0xc3, - 0x6f, 0x49, 0xa4, 0x9a, 0xf9, 0x86, 0xb2, 0x41, 0xb0, 0x69, 0x8a, 0x57, 0x94, 0x85, 0x34, 0x23, - 0x1c, 0x81, 0x7a, 0x44, 0x04, 0x8f, 0xa3, 0x3e, 0x71, 0xca, 0x9a, 0xfd, 0xf1, 0x7f, 0x67, 0x47, - 0x86, 0x21, 0x68, 0x99, 0x0a, 0xf5, 0xd4, 0x83, 0x32, 0x76, 0xf8, 0x29, 0x68, 0x88, 0xf8, 0x24, - 0x0d, 0x38, 0x15, 0xad, 0xc7, 0x5d, 0x03, 0x68, 0x3c, 0xcb, 0x43, 0xa8, 0x98, 0x07, 0x29, 0x68, - 0x44, 0x89, 0x92, 0xaa, 0x6b, 0xe7, 0x9d, 0x1b, 0x29, 0xd0, 0x54, 0xa5, 0x50, 0x4e, 0x87, 0x8a, - 0xdc, 0x70, 0x0e, 0x9a, 0xc6, 0xcc, 0xba, 0xbc, 0x7d, 0x63, 0x49, 0xee, 0x2e, 0x17, 0x9d, 0x26, - 0x7a, 0x93, 0x16, 0x5d, 0xae, 0x03, 0xbf, 0x06, 0xd0, 0xb8, 0x0a, 0x42, 0x38, 0x4d, 0xad, 0x51, - 0xdb, 0x68, 0x04, 0xd1, 0x95, 0x0c, 0xb4, 0x02, 0x05, 0xbb, 0xa0, 0xc2, 0xf0, 0x94, 0x38, 0x1b, - 0x1a, 0x9d, 0x0d, 0xfd, 0x29, 0x9e, 0x12, 0xa4, 0x23, 0xd0, 0x07, 0xb6, 0xfa, 0x2b, 0x42, 0xdc, - 0x27, 0x4e, 0x55, 0xa7, 0xdd, 0x31, 0x69, 0xf6, 0xd3, 0x34, 0x80, 0xf2, 0x1c, 0xf8, 0x19, 0xb0, - 0x79, 0xa8, 0x5e, 0x75, 0xca, 0x99, 0x53, 0xd3, 0x00, 0x37, 0x05, 0x1c, 0xa6, 0x81, 0x8b, 0xa2, - 0x81, 0x72, 0x00, 0x7c, 0x0e, 0xea, 0xb1, 0x20, 0xd1, 0x01, 0x7b, 0xc9, 0x9d, 0xba, 0x16, 0xf4, - 0x03, 0xaf, 0x78, 0x3e, 0xde, 0x58, 0x7b, 0x25, 0xe4, 0xb1, 0xc9, 0xce, 0xdf, 0xa7, 0xd4, 0x83, - 0x32, 0x26, 0x78, 0x0c, 0xaa, 0xfc, 0xe4, 0x47, 0xd2, 0x97, 0x8e, 0xad, 0x39, 0x1f, 0xae, 0x1d, - 0x92, 0xd9, 0x5a, 0x0f, 0xe1, 0xd3, 0x27, 0x3f, 0x4b, 0xc2, 0xd4, 0x7c, 0x82, 0xdb, 0x86, 0xba, - 0x7a, 0xa8, 0x49, 0x90, 0x21, 0x83, 0x3f, 0x00, 0x9b, 0x4f, 0x06, 0x89, 0xd3, 0x01, 0x6f, 0xc3, - 0x9c, 0x49, 0x79, 0x98, 0xf2, 0xa0, 0x9c, 0x12, 0x6e, 0x81, 0xea, 0x20, 0x9a, 0xa3, 0x98, 0x39, - 0x8d, 0xae, 0xd5, 0xab, 0x07, 0x40, 0xf5, 0xb0, 0xaf, 0x3d, 0xc8, 0x44, 0xe0, 0x0b, 0x50, 0xe3, - 0xa1, 0x12, 0x43, 0x38, 0x9b, 0x6f, 0xd3, 0x41, 0xd3, 0x74, 0x50, 0x3b, 0x4c, 0x58, 0x50, 0x4a, - 0xb7, 0xf5, 0x47, 0x05, 0xdc, 0x29, 0x5c, 0x28, 0x11, 0x72, 0x26, 0xc8, 0xff, 0x72, 0xa2, 0xee, - 0x83, 0x1a, 0x9e, 0x4c, 0xf8, 0x29, 0x49, 0xae, 0x54, 0x3d, 0x6f, 0x62, 0x2f, 0x71, 0xa3, 0x34, - 0x0e, 0x8f, 0x40, 0x55, 0x48, 0x2c, 0x63, 0x61, 0x2e, 0xce, 0x83, 0xeb, 0xad, 0xd7, 0x33, 0x8d, - 0x49, 0x04, 0x43, 0x44, 0xc4, 0x13, 0x89, 0x0c, 0x0f, 0xec, 0x80, 0x8d, 0x10, 0xcb, 0xfe, 0x48, - 0x5f, 0x95, 0xcd, 0xc0, 0x5e, 0x2e, 0x3a, 0x1b, 0x47, 0xca, 0x81, 0x12, 0x3f, 0xdc, 0x05, 0xb6, - 0x7e, 0x78, 0x3e, 0x0f, 0xd3, 0xc5, 0x68, 0xab, 0x11, 0x1d, 0xa5, 0xce, 0x8b, 0xa2, 0x81, 0xf2, - 0x64, 0xf8, 0xab, 0x05, 0x5a, 0x38, 0x1e, 0x50, 0xb9, 0xc7, 0x18, 0x97, 0x38, 0x99, 0x4a, 0xb5, - 0x5b, 0xee, 0x35, 0x76, 0xbe, 0xf0, 0xd6, 0x7c, 0x04, 0xbd, 0x2b, 0x12, 0x7b, 0x7b, 0x97, 0x28, - 0x9e, 0x30, 0x19, 0xcd, 0x03, 0xc7, 0x68, 0xd4, 0xba, 0x1c, 0x46, 0x57, 0x6a, 0xc2, 0x1e, 0xa8, - 0x9f, 0xe2, 0x88, 0x51, 0x36, 0x14, 0x4e, 0xad, 0x5b, 0x56, 0xab, 0xad, 0x36, 0xe3, 0x3b, 0xe3, - 0x43, 0x59, 0xb4, 0xfd, 0x25, 0x78, 0x6f, 0x65, 0x39, 0xd8, 0x02, 0xe5, 0x31, 0x99, 0x27, 0x73, - 0x46, 0xea, 0x11, 0xbe, 0x0b, 0x36, 0x66, 0x78, 0x12, 0x13, 0x3d, 0x33, 0x1b, 0x25, 0xc6, 0xe3, - 0x5b, 0xbb, 0xd6, 0xd6, 0x9f, 0x16, 0x68, 0x16, 0xfe, 0x8d, 0x19, 0x25, 0xa7, 0xf0, 0x08, 0xd4, - 0xcc, 0xbd, 0xd1, 0x1c, 0x8d, 0x9d, 0xfb, 0xd7, 0x51, 0x40, 0x03, 0x82, 0x86, 0x7a, 0x15, 0xd2, - 0x3b, 0x98, 0xd2, 0xa8, 0xd3, 0x10, 0x19, 0x89, 0xcc, 0xc7, 0xed, 0xa3, 0xeb, 0x8b, 0x9a, 0x08, - 0x90, 0x5a, 0x28, 0x63, 0x0a, 0x3e, 0x3f, 0x3b, 0x77, 0x4b, 0xaf, 0xce, 0xdd, 0xd2, 0xeb, 0x73, - 0xb7, 0xf4, 0xcb, 0xd2, 0xb5, 0xce, 0x96, 0xae, 0xf5, 0x6a, 0xe9, 0x5a, 0xaf, 0x97, 0xae, 0xf5, - 0xd7, 0xd2, 0xb5, 0x7e, 0xfb, 0xdb, 0x2d, 0x7d, 0x7f, 0x6f, 0xcd, 0x6f, 0x9d, 0x7f, 0x02, 0x00, - 0x00, 0xff, 0xff, 0x5c, 0x49, 0x23, 0x22, 0x05, 0x09, 0x00, 0x00, -} - -func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.RequestSubResource) - copy(dAtA[i:], m.RequestSubResource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) - i-- - dAtA[i] = 0x7a - if m.RequestResource != nil { - { - size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.RequestKind != nil { - { - size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - if m.DryRun != nil { - i-- - if *m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - { - size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - { - size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - i -= len(m.Operation) - copy(dAtA[i:], m.Operation) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i-- - dAtA[i] = 0x3a - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x32 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x2a - i -= len(m.SubResource) - copy(dAtA[i:], m.SubResource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) - i-- - dAtA[i] = 0x22 - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Warnings) > 0 { - for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Warnings[iNdEx]) - copy(dAtA[i:], m.Warnings[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - if len(m.AuditAnnotations) > 0 { - keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) - for k := range m.AuditAnnotations { - keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAuditAnnotations[iNdEx]) - copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if m.PatchType != nil { - i -= len(*m.PatchType) - copy(dAtA[i:], *m.PatchType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) - i-- - dAtA[i] = 0x2a - } - if m.Patch != nil { - i -= len(m.Patch) - copy(dAtA[i:], m.Patch) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) - i-- - dAtA[i] = 0x22 - } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i-- - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Request != nil { - { - size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AdmissionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Kind.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubResource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - l = m.UserInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.OldObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DryRun != nil { - n += 2 - } - l = m.Options.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.RequestKind != nil { - l = m.RequestKind.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RequestResource != nil { - l = m.RequestResource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.RequestSubResource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AdmissionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Patch != nil { - l = len(m.Patch) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PatchType != nil { - l = len(*m.PatchType) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AuditAnnotations) > 0 { - for k, v := range m.AuditAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Warnings) > 0 { - for _, s := range m.Warnings { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AdmissionReview) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Request != nil { - l = m.Request.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AdmissionRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionRequest{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, - `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, - `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, - `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, - `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionResponse) String() string { - if this == nil { - return "nil" - } - keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) - for k := range this.AuditAnnotations { - keysForAuditAnnotations = append(keysForAuditAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - mapStringForAuditAnnotations := "map[string]string{" - for _, k := range keysForAuditAnnotations { - mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) - } - mapStringForAuditAnnotations += "}" - s := strings.Join([]string{`&AdmissionResponse{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, - `Patch:` + valueToStringGenerated(this.Patch) + `,`, - `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, - `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, - `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionReview{`, - `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, - `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubResource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operation = Operation(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DryRun = &b - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestKind == nil { - m.RequestKind = &v1.GroupVersionKind{} - } - if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestResource == nil { - m.RequestResource = &v1.GroupVersionResource{} - } - if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequestSubResource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &v1.Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) - if m.Patch == nil { - m.Patch = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PatchType(dAtA[iNdEx:postIndex]) - m.PatchType = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuditAnnotations == nil { - m.AuditAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.AuditAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Request == nil { - m.Request = &AdmissionRequest{} - } - if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &AdmissionResponse{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/admission/v1/generated.proto deleted file mode 100644 index 9648aa58fb..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/generated.proto +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.admission.v1; - -import "k8s.io/api/authentication/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/admission/v1"; - -// AdmissionRequest describes the admission.Attributes for the admission request. -message AdmissionRequest { - // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - optional string uid = 1; - - // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; - - // Resource is the fully-qualified resource being requested (for example, v1.pods) - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; - - // SubResource is the subresource being requested, if any (for example, "status" or "scale") - // +optional - optional string subResource = 4; - - // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). - // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), - // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type for more details. - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; - - // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). - // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), - // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; - - // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") - // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - optional string requestSubResource = 15; - - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this field will contain an empty string. - // +optional - optional string name = 5; - - // Namespace is the namespace associated with the request (if any). - // +optional - optional string namespace = 6; - - // Operation is the operation being performed. This may be different than the operation - // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. - optional string operation = 7; - - // UserInfo is information about the requesting user - optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8; - - // Object is the object from the incoming request. - // +optional - optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; - - // OldObject is the existing object. Only populated for DELETE and UPDATE requests. - // +optional - optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; - - // DryRun indicates that modifications will definitely not be persisted for this request. - // Defaults to false. - // +optional - optional bool dryRun = 11; - - // Options is the operation option structure of the operation being performed. - // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be - // different than the options the caller provided. e.g. for a patch request the performed - // Operation might be a CREATE, in which case the Options will a - // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. - // +optional - optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; -} - -// AdmissionResponse describes an admission response. -message AdmissionResponse { - // UID is an identifier for the individual request/response. - // This must be copied over from the corresponding AdmissionRequest. - optional string uid = 1; - - // Allowed indicates whether or not the admission request was permitted. - optional bool allowed = 2; - - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; - - // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. - // +optional - optional bytes patch = 4; - - // The type of Patch. Currently we only allow "JSONPatch". - // +optional - optional string patchType = 5; - - // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). - // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with - // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by - // the admission webhook to add additional context to the audit log for this request. - // +optional - map auditAnnotations = 6; - - // warnings is a list of warning messages to return to the requesting API client. - // Warning messages describe a problem the client making the API request should correct or be aware of. - // Limit warnings to 120 characters if possible. - // Warnings over 256 characters and large numbers of warnings may be truncated. - // +optional - repeated string warnings = 7; -} - -// AdmissionReview describes an admission review request/response. -message AdmissionReview { - // Request describes the attributes for the admission request. - // +optional - optional AdmissionRequest request = 1; - - // Response describes the attributes for the admission response. - // +optional - optional AdmissionResponse response = 2; -} - diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/register.go b/openshift/tools/vendor/k8s.io/api/admission/v1/register.go deleted file mode 100644 index 79000535c7..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name for this API. -const GroupName = "admission.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. -// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. -var ( - // SchemeBuilder points to a list of functions added to Scheme. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &AdmissionReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/types.go b/openshift/tools/vendor/k8s.io/api/admission/v1/types.go deleted file mode 100644 index 2def92da5b..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/types.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authenticationv1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.19 - -// AdmissionReview describes an admission review request/response. -type AdmissionReview struct { - metav1.TypeMeta `json:",inline"` - // Request describes the attributes for the admission request. - // +optional - Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // Response describes the attributes for the admission response. - // +optional - Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` -} - -// AdmissionRequest describes the admission.Attributes for the admission request. -type AdmissionRequest struct { - // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` - // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Resource is the fully-qualified resource being requested (for example, v1.pods) - Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` - // SubResource is the subresource being requested, if any (for example, "status" or "scale") - // +optional - SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` - - // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). - // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), - // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type for more details. - // +optional - RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` - // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). - // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), - // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` - // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") - // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` - - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this field will contain an empty string. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` - // Namespace is the namespace associated with the request (if any). - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` - // Operation is the operation being performed. This may be different than the operation - // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. - Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` - // UserInfo is information about the requesting user - UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` - // Object is the object from the incoming request. - // +optional - Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` - // OldObject is the existing object. Only populated for DELETE and UPDATE requests. - // +optional - OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` - // DryRun indicates that modifications will definitely not be persisted for this request. - // Defaults to false. - // +optional - DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` - // Options is the operation option structure of the operation being performed. - // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be - // different than the options the caller provided. e.g. for a patch request the performed - // Operation might be a CREATE, in which case the Options will a - // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. - // +optional - Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` -} - -// AdmissionResponse describes an admission response. -type AdmissionResponse struct { - // UID is an identifier for the individual request/response. - // This must be copied over from the corresponding AdmissionRequest. - UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` - - // Allowed indicates whether or not the admission request was permitted. - Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` - - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` - - // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. - // +optional - Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` - - // The type of Patch. Currently we only allow "JSONPatch". - // +optional - PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` - - // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). - // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with - // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by - // the admission webhook to add additional context to the audit log for this request. - // +optional - AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` - - // warnings is a list of warning messages to return to the requesting API client. - // Warning messages describe a problem the client making the API request should correct or be aware of. - // Limit warnings to 120 characters if possible. - // Warnings over 256 characters and large numbers of warnings may be truncated. - // +optional - Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` -} - -// PatchType is the type of patch being used to represent the mutated object -type PatchType string - -// PatchType constants. -const ( - PatchTypeJSONPatch PatchType = "JSONPatch" -) - -// Operation is the type of resource operation being checked for admission control -type Operation string - -// Operation constants -const ( - Create Operation = "CREATE" - Update Operation = "UPDATE" - Delete Operation = "DELETE" - Connect Operation = "CONNECT" -) diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go deleted file mode 100644 index 1395a7e107..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AdmissionRequest = map[string]string{ - "": "AdmissionRequest describes the admission.Attributes for the admission request.", - "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", - "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", - "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", - "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", - "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", - "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", - "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", - "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", - "namespace": "Namespace is the namespace associated with the request (if any).", - "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", - "userInfo": "UserInfo is information about the requesting user", - "object": "Object is the object from the incoming request.", - "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", - "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", - "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", -} - -func (AdmissionRequest) SwaggerDoc() map[string]string { - return map_AdmissionRequest -} - -var map_AdmissionResponse = map[string]string{ - "": "AdmissionResponse describes an admission response.", - "uid": "UID is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.", - "allowed": "Allowed indicates whether or not the admission request was permitted.", - "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", - "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", - "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", - "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", - "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", -} - -func (AdmissionResponse) SwaggerDoc() map[string]string { - return map_AdmissionResponse -} - -var map_AdmissionReview = map[string]string{ - "": "AdmissionReview describes an admission review request/response.", - "request": "Request describes the attributes for the admission request.", - "response": "Response describes the attributes for the admission response.", -} - -func (AdmissionReview) SwaggerDoc() map[string]string { - return map_AdmissionReview -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go b/openshift/tools/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go deleted file mode 100644 index d356882851..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,142 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { - *out = *in - out.Kind = in.Kind - out.Resource = in.Resource - if in.RequestKind != nil { - in, out := &in.RequestKind, &out.RequestKind - *out = new(metav1.GroupVersionKind) - **out = **in - } - if in.RequestResource != nil { - in, out := &in.RequestResource, &out.RequestResource - *out = new(metav1.GroupVersionResource) - **out = **in - } - in.UserInfo.DeepCopyInto(&out.UserInfo) - in.Object.DeepCopyInto(&out.Object) - in.OldObject.DeepCopyInto(&out.OldObject) - if in.DryRun != nil { - in, out := &in.DryRun, &out.DryRun - *out = new(bool) - **out = **in - } - in.Options.DeepCopyInto(&out.Options) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. -func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { - if in == nil { - return nil - } - out := new(AdmissionRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { - *out = *in - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(metav1.Status) - (*in).DeepCopyInto(*out) - } - if in.Patch != nil { - in, out := &in.Patch, &out.Patch - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.PatchType != nil { - in, out := &in.PatchType, &out.PatchType - *out = new(PatchType) - **out = **in - } - if in.AuditAnnotations != nil { - in, out := &in.AuditAnnotations, &out.AuditAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Warnings != nil { - in, out := &in.Warnings, &out.Warnings - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. -func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { - if in == nil { - return nil - } - out := new(AdmissionResponse) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = new(AdmissionRequest) - (*in).DeepCopyInto(*out) - } - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = new(AdmissionResponse) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. -func (in *AdmissionReview) DeepCopy() *AdmissionReview { - if in == nil { - return nil - } - out := new(AdmissionReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AdmissionReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go b/openshift/tools/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index ac81d993c6..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1 - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { - return 1, 19 -} diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/doc.go b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/doc.go deleted file mode 100644 index 447495684e..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=false -// +k8s:prerelease-lifecycle-gen=true - -// +groupName=admission.k8s.io - -package v1beta1 diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/generated.pb.go deleted file mode 100644 index 22147cbe94..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/generated.pb.go +++ /dev/null @@ -1,1782 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/admission/v1beta1/generated.proto - -package v1beta1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } -func (*AdmissionRequest) ProtoMessage() {} -func (*AdmissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d8f147b43c61e73e, []int{0} -} -func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionRequest.Merge(m, src) -} -func (m *AdmissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AdmissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo - -func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } -func (*AdmissionResponse) ProtoMessage() {} -func (*AdmissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d8f147b43c61e73e, []int{1} -} -func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionResponse.Merge(m, src) -} -func (m *AdmissionResponse) XXX_Size() int { - return m.Size() -} -func (m *AdmissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo - -func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } -func (*AdmissionReview) ProtoMessage() {} -func (*AdmissionReview) Descriptor() ([]byte, []int) { - return fileDescriptor_d8f147b43c61e73e, []int{2} -} -func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionReview.Merge(m, src) -} -func (m *AdmissionReview) XXX_Size() int { - return m.Size() -} -func (m *AdmissionReview) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionReview.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") - proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry") - proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") -} - -func init() { - proto.RegisterFile("k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_d8f147b43c61e73e) -} - -var fileDescriptor_d8f147b43c61e73e = []byte{ - // 911 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xb4, 0xb2, 0xaa, 0xb5, 0xc9, 0x01, - 0x19, 0xa9, 0x9d, 0x25, 0x11, 0x54, 0x51, 0xc5, 0x25, 0x4b, 0x22, 0x14, 0x90, 0x9a, 0x68, 0x5a, - 0x43, 0xe1, 0x80, 0x34, 0xb6, 0xa7, 0xf6, 0x60, 0x7b, 0x66, 0xd9, 0x99, 0x4d, 0xf0, 0x8d, 0x3b, - 0x17, 0xbe, 0x01, 0x5f, 0x80, 0x6f, 0xc1, 0x25, 0xc7, 0x1e, 0x7b, 0xb2, 0x88, 0xf9, 0x16, 0x39, - 0xa1, 0x99, 0x9d, 0xf5, 0x3a, 0x4e, 0x52, 0xfa, 0xef, 0x94, 0x7d, 0x7f, 0x7e, 0xbf, 0xf7, 0xf2, - 0x7b, 0xfb, 0xde, 0x1a, 0xdc, 0x1f, 0xef, 0x4a, 0xc4, 0x44, 0x40, 0x22, 0x16, 0x90, 0xc1, 0x94, - 0x49, 0xc9, 0x04, 0x0f, 0x4e, 0xb6, 0x7b, 0x54, 0x91, 0xed, 0x60, 0x48, 0x39, 0x8d, 0x89, 0xa2, - 0x03, 0x14, 0xc5, 0x42, 0x09, 0x78, 0x2f, 0xcd, 0x46, 0x24, 0x62, 0x68, 0x99, 0x8d, 0x6c, 0x76, - 0xf3, 0xc1, 0x90, 0xa9, 0x51, 0xd2, 0x43, 0x7d, 0x31, 0x0d, 0x86, 0x62, 0x28, 0x02, 0x03, 0xea, - 0x25, 0xcf, 0x8d, 0x65, 0x0c, 0xf3, 0x94, 0x92, 0x35, 0x2f, 0x95, 0x4e, 0xd4, 0x88, 0x72, 0xc5, - 0xfa, 0x44, 0xa5, 0xf5, 0xd7, 0x4b, 0x37, 0x3f, 0xcf, 0xb3, 0xa7, 0xa4, 0x3f, 0x62, 0x9c, 0xc6, - 0xb3, 0x20, 0x1a, 0x0f, 0xb5, 0x43, 0x06, 0x53, 0xaa, 0xc8, 0x75, 0xa8, 0xe0, 0x26, 0x54, 0x9c, - 0x70, 0xc5, 0xa6, 0xf4, 0x0a, 0xe0, 0xe1, 0xff, 0x01, 0x64, 0x7f, 0x44, 0xa7, 0x64, 0x1d, 0xb7, - 0xf5, 0xa7, 0x0b, 0x1a, 0x7b, 0x99, 0x22, 0x98, 0xfe, 0x92, 0x50, 0xa9, 0x60, 0x08, 0x8a, 0x09, - 0x1b, 0x78, 0x4e, 0xdb, 0xe9, 0xb8, 0xe1, 0x67, 0x67, 0xf3, 0x56, 0x61, 0x31, 0x6f, 0x15, 0xbb, - 0x87, 0xfb, 0x17, 0xf3, 0xd6, 0xc7, 0x37, 0x15, 0x52, 0xb3, 0x88, 0x4a, 0xd4, 0x3d, 0xdc, 0xc7, - 0x1a, 0x0c, 0x9f, 0x81, 0xd2, 0x98, 0xf1, 0x81, 0x77, 0xab, 0xed, 0x74, 0x6a, 0x3b, 0x0f, 0x51, - 0x3e, 0x81, 0x25, 0x0c, 0x45, 0xe3, 0xa1, 0x76, 0x48, 0xa4, 0x65, 0x40, 0x27, 0xdb, 0xe8, 0xeb, - 0x58, 0x24, 0xd1, 0x77, 0x34, 0xd6, 0xcd, 0x7c, 0xcb, 0xf8, 0x20, 0xdc, 0xb4, 0xc5, 0x4b, 0xda, - 0xc2, 0x86, 0x11, 0x8e, 0x40, 0x35, 0xa6, 0x52, 0x24, 0x71, 0x9f, 0x7a, 0x45, 0xc3, 0xfe, 0xe8, - 0xcd, 0xd9, 0xb1, 0x65, 0x08, 0x1b, 0xb6, 0x42, 0x35, 0xf3, 0xe0, 0x25, 0x3b, 0xfc, 0x02, 0xd4, - 0x64, 0xd2, 0xcb, 0x02, 0x5e, 0xc9, 0xe8, 0x71, 0xd7, 0x02, 0x6a, 0x4f, 0xf2, 0x10, 0x5e, 0xcd, - 0x83, 0x0c, 0xd4, 0xe2, 0x54, 0x49, 0xdd, 0xb5, 0xf7, 0xc1, 0x3b, 0x29, 0x50, 0xd7, 0xa5, 0x70, - 0x4e, 0x87, 0x57, 0xb9, 0xe1, 0x0c, 0xd4, 0xad, 0xb9, 0xec, 0xf2, 0xf6, 0x3b, 0x4b, 0x72, 0x77, - 0x31, 0x6f, 0xd5, 0xf1, 0x65, 0x5a, 0xbc, 0x5e, 0x07, 0x7e, 0x03, 0xa0, 0x75, 0xad, 0x08, 0xe1, - 0xd5, 0x8d, 0x46, 0x4d, 0xab, 0x11, 0xc4, 0x57, 0x32, 0xf0, 0x35, 0x28, 0xd8, 0x06, 0x25, 0x4e, - 0xa6, 0xd4, 0xdb, 0x30, 0xe8, 0xe5, 0xd0, 0x1f, 0x93, 0x29, 0xc5, 0x26, 0x02, 0x03, 0xe0, 0xea, - 0xbf, 0x32, 0x22, 0x7d, 0xea, 0x95, 0x4d, 0xda, 0x1d, 0x9b, 0xe6, 0x3e, 0xce, 0x02, 0x38, 0xcf, - 0x81, 0x5f, 0x02, 0x57, 0x44, 0xfa, 0x55, 0x67, 0x82, 0x7b, 0x15, 0x03, 0xf0, 0x33, 0xc0, 0x51, - 0x16, 0xb8, 0x58, 0x35, 0x70, 0x0e, 0x80, 0x4f, 0x41, 0x35, 0x91, 0x34, 0x3e, 0xe4, 0xcf, 0x85, - 0x57, 0x35, 0x82, 0x7e, 0x82, 0x56, 0x6f, 0xc8, 0xa5, 0xb5, 0xd7, 0x42, 0x76, 0x6d, 0x76, 0xfe, - 0x3e, 0x65, 0x1e, 0xbc, 0x64, 0x82, 0x5d, 0x50, 0x16, 0xbd, 0x9f, 0x69, 0x5f, 0x79, 0xae, 0xe1, - 0x7c, 0x70, 0xe3, 0x90, 0xec, 0xd6, 0x22, 0x4c, 0x4e, 0x0f, 0x7e, 0x55, 0x94, 0xeb, 0xf9, 0x84, - 0xb7, 0x2d, 0x75, 0xf9, 0xc8, 0x90, 0x60, 0x4b, 0x06, 0x7f, 0x02, 0xae, 0x98, 0x0c, 0x52, 0xa7, - 0x07, 0xde, 0x86, 0x79, 0x29, 0xe5, 0x51, 0xc6, 0x83, 0x73, 0x4a, 0xb8, 0x05, 0xca, 0x83, 0x78, - 0x86, 0x13, 0xee, 0xd5, 0xda, 0x4e, 0xa7, 0x1a, 0x02, 0xdd, 0xc3, 0xbe, 0xf1, 0x60, 0x1b, 0x81, - 0xcf, 0x40, 0x45, 0x44, 0x5a, 0x0c, 0xe9, 0x6d, 0xbe, 0x4d, 0x07, 0x75, 0xdb, 0x41, 0xe5, 0x28, - 0x65, 0xc1, 0x19, 0xdd, 0xd6, 0x5f, 0x25, 0x70, 0x67, 0xe5, 0x42, 0xc9, 0x48, 0x70, 0x49, 0xdf, - 0xcb, 0x89, 0xfa, 0x14, 0x54, 0xc8, 0x64, 0x22, 0x4e, 0x69, 0x7a, 0xa5, 0xaa, 0x79, 0x13, 0x7b, - 0xa9, 0x1b, 0x67, 0x71, 0x78, 0x0c, 0xca, 0x52, 0x11, 0x95, 0x48, 0x7b, 0x71, 0xee, 0xbf, 0xde, - 0x7a, 0x3d, 0x31, 0x98, 0x54, 0x30, 0x4c, 0x65, 0x32, 0x51, 0xd8, 0xf2, 0xc0, 0x16, 0xd8, 0x88, - 0x88, 0xea, 0x8f, 0xcc, 0x55, 0xd9, 0x0c, 0xdd, 0xc5, 0xbc, 0xb5, 0x71, 0xac, 0x1d, 0x38, 0xf5, - 0xc3, 0x5d, 0xe0, 0x9a, 0x87, 0xa7, 0xb3, 0x28, 0x5b, 0x8c, 0xa6, 0x1e, 0xd1, 0x71, 0xe6, 0xbc, - 0x58, 0x35, 0x70, 0x9e, 0x0c, 0x7f, 0x77, 0x40, 0x83, 0x24, 0x03, 0xa6, 0xf6, 0x38, 0x17, 0x8a, - 0xa4, 0x53, 0x29, 0xb7, 0x8b, 0x9d, 0xda, 0xce, 0x01, 0x7a, 0xd5, 0x97, 0x10, 0x5d, 0xd1, 0x19, - 0xed, 0xad, 0xf1, 0x1c, 0x70, 0x15, 0xcf, 0x42, 0xcf, 0x0a, 0xd5, 0x58, 0x0f, 0xe3, 0x2b, 0x85, - 0x61, 0x07, 0x54, 0x4f, 0x49, 0xcc, 0x19, 0x1f, 0x4a, 0xaf, 0xd2, 0x2e, 0xea, 0xfd, 0xd6, 0xeb, - 0xf1, 0xbd, 0xf5, 0xe1, 0x65, 0xb4, 0xf9, 0x15, 0xf8, 0xe8, 0xda, 0x72, 0xb0, 0x01, 0x8a, 0x63, - 0x3a, 0x4b, 0x87, 0x8d, 0xf5, 0x23, 0xfc, 0x10, 0x6c, 0x9c, 0x90, 0x49, 0x42, 0xcd, 0xe0, 0x5c, - 0x9c, 0x1a, 0x8f, 0x6e, 0xed, 0x3a, 0x5b, 0x7f, 0x3b, 0xa0, 0xbe, 0xf2, 0x6f, 0x9c, 0x30, 0x7a, - 0x0a, 0xbb, 0xa0, 0x62, 0x8f, 0x8e, 0xe1, 0xa8, 0xed, 0xa0, 0xd7, 0x96, 0xc1, 0xa0, 0xc2, 0x9a, - 0x7e, 0x29, 0xb2, 0x8b, 0x98, 0x71, 0xc1, 0x1f, 0xcc, 0x87, 0xc8, 0xe8, 0x64, 0x3f, 0x73, 0xc1, - 0x1b, 0xca, 0x9b, 0x4a, 0x91, 0x59, 0x78, 0x49, 0x17, 0x86, 0x67, 0xe7, 0x7e, 0xe1, 0xc5, 0xb9, - 0x5f, 0x78, 0x79, 0xee, 0x17, 0x7e, 0x5b, 0xf8, 0xce, 0xd9, 0xc2, 0x77, 0x5e, 0x2c, 0x7c, 0xe7, - 0xe5, 0xc2, 0x77, 0xfe, 0x59, 0xf8, 0xce, 0x1f, 0xff, 0xfa, 0x85, 0x1f, 0xef, 0xbd, 0xea, 0x47, - 0xd0, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x6e, 0x31, 0x41, 0x23, 0x09, 0x00, 0x00, -} - -func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.RequestSubResource) - copy(dAtA[i:], m.RequestSubResource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) - i-- - dAtA[i] = 0x7a - if m.RequestResource != nil { - { - size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.RequestKind != nil { - { - size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - if m.DryRun != nil { - i-- - if *m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - { - size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - { - size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - i -= len(m.Operation) - copy(dAtA[i:], m.Operation) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i-- - dAtA[i] = 0x3a - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x32 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x2a - i -= len(m.SubResource) - copy(dAtA[i:], m.SubResource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) - i-- - dAtA[i] = 0x22 - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Warnings) > 0 { - for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Warnings[iNdEx]) - copy(dAtA[i:], m.Warnings[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - if len(m.AuditAnnotations) > 0 { - keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) - for k := range m.AuditAnnotations { - keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAuditAnnotations[iNdEx]) - copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if m.PatchType != nil { - i -= len(*m.PatchType) - copy(dAtA[i:], *m.PatchType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) - i-- - dAtA[i] = 0x2a - } - if m.Patch != nil { - i -= len(m.Patch) - copy(dAtA[i:], m.Patch) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) - i-- - dAtA[i] = 0x22 - } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i-- - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Request != nil { - { - size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AdmissionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Kind.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubResource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - l = m.UserInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.OldObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DryRun != nil { - n += 2 - } - l = m.Options.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.RequestKind != nil { - l = m.RequestKind.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RequestResource != nil { - l = m.RequestResource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.RequestSubResource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AdmissionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Patch != nil { - l = len(m.Patch) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PatchType != nil { - l = len(*m.PatchType) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AuditAnnotations) > 0 { - for k, v := range m.AuditAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Warnings) > 0 { - for _, s := range m.Warnings { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AdmissionReview) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Request != nil { - l = m.Request.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AdmissionRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionRequest{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, - `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, - `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, - `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, - `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionResponse) String() string { - if this == nil { - return "nil" - } - keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) - for k := range this.AuditAnnotations { - keysForAuditAnnotations = append(keysForAuditAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - mapStringForAuditAnnotations := "map[string]string{" - for _, k := range keysForAuditAnnotations { - mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) - } - mapStringForAuditAnnotations += "}" - s := strings.Join([]string{`&AdmissionResponse{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, - `Patch:` + valueToStringGenerated(this.Patch) + `,`, - `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, - `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, - `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionReview{`, - `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, - `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubResource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operation = Operation(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DryRun = &b - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestKind == nil { - m.RequestKind = &v1.GroupVersionKind{} - } - if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestResource == nil { - m.RequestResource = &v1.GroupVersionResource{} - } - if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequestSubResource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &v1.Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) - if m.Patch == nil { - m.Patch = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PatchType(dAtA[iNdEx:postIndex]) - m.PatchType = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuditAnnotations == nil { - m.AuditAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.AuditAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Request == nil { - m.Request = &AdmissionRequest{} - } - if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &AdmissionResponse{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/generated.proto b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/generated.proto deleted file mode 100644 index d27c05b727..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/generated.proto +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.admission.v1beta1; - -import "k8s.io/api/authentication/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/admission/v1beta1"; - -// AdmissionRequest describes the admission.Attributes for the admission request. -message AdmissionRequest { - // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - optional string uid = 1; - - // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; - - // Resource is the fully-qualified resource being requested (for example, v1.pods) - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; - - // SubResource is the subresource being requested, if any (for example, "status" or "scale") - // +optional - optional string subResource = 4; - - // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). - // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), - // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type for more details. - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; - - // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). - // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), - // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; - - // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") - // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - optional string requestSubResource = 15; - - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this field will contain an empty string. - // +optional - optional string name = 5; - - // Namespace is the namespace associated with the request (if any). - // +optional - optional string namespace = 6; - - // Operation is the operation being performed. This may be different than the operation - // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. - optional string operation = 7; - - // UserInfo is information about the requesting user - optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8; - - // Object is the object from the incoming request. - // +optional - optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; - - // OldObject is the existing object. Only populated for DELETE and UPDATE requests. - // +optional - optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; - - // DryRun indicates that modifications will definitely not be persisted for this request. - // Defaults to false. - // +optional - optional bool dryRun = 11; - - // Options is the operation option structure of the operation being performed. - // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be - // different than the options the caller provided. e.g. for a patch request the performed - // Operation might be a CREATE, in which case the Options will a - // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. - // +optional - optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; -} - -// AdmissionResponse describes an admission response. -message AdmissionResponse { - // UID is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. - optional string uid = 1; - - // Allowed indicates whether or not the admission request was permitted. - optional bool allowed = 2; - - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; - - // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. - // +optional - optional bytes patch = 4; - - // The type of Patch. Currently we only allow "JSONPatch". - // +optional - optional string patchType = 5; - - // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). - // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with - // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by - // the admission webhook to add additional context to the audit log for this request. - // +optional - map auditAnnotations = 6; - - // warnings is a list of warning messages to return to the requesting API client. - // Warning messages describe a problem the client making the API request should correct or be aware of. - // Limit warnings to 120 characters if possible. - // Warnings over 256 characters and large numbers of warnings may be truncated. - // +optional - repeated string warnings = 7; -} - -// AdmissionReview describes an admission review request/response. -message AdmissionReview { - // Request describes the attributes for the admission request. - // +optional - optional AdmissionRequest request = 1; - - // Response describes the attributes for the admission response. - // +optional - optional AdmissionResponse response = 2; -} - diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/register.go b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/register.go deleted file mode 100644 index 1c53e755dd..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name for this API. -const GroupName = "admission.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. -// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. -var ( - // SchemeBuilder points to a list of functions added to Scheme. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &AdmissionReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/types.go b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/types.go deleted file mode 100644 index 00c619d998..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/types.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - authenticationv1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.9 -// +k8s:prerelease-lifecycle-gen:deprecated=1.19 -// This API is never server served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally -// and having the generator against this group will protect future APIs which may be served. -// +k8s:prerelease-lifecycle-gen:replacement=admission.k8s.io,v1,AdmissionReview - -// AdmissionReview describes an admission review request/response. -type AdmissionReview struct { - metav1.TypeMeta `json:",inline"` - // Request describes the attributes for the admission request. - // +optional - Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // Response describes the attributes for the admission response. - // +optional - Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` -} - -// AdmissionRequest describes the admission.Attributes for the admission request. -type AdmissionRequest struct { - // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` - // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Resource is the fully-qualified resource being requested (for example, v1.pods) - Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` - // SubResource is the subresource being requested, if any (for example, "status" or "scale") - // +optional - SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` - - // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). - // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), - // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type for more details. - // +optional - RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` - // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). - // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), - // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` - // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") - // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` - - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this field will contain an empty string. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` - // Namespace is the namespace associated with the request (if any). - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` - // Operation is the operation being performed. This may be different than the operation - // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. - Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` - // UserInfo is information about the requesting user - UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` - // Object is the object from the incoming request. - // +optional - Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` - // OldObject is the existing object. Only populated for DELETE and UPDATE requests. - // +optional - OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` - // DryRun indicates that modifications will definitely not be persisted for this request. - // Defaults to false. - // +optional - DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` - // Options is the operation option structure of the operation being performed. - // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be - // different than the options the caller provided. e.g. for a patch request the performed - // Operation might be a CREATE, in which case the Options will a - // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. - // +optional - Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` -} - -// AdmissionResponse describes an admission response. -type AdmissionResponse struct { - // UID is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. - UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` - - // Allowed indicates whether or not the admission request was permitted. - Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` - - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` - - // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. - // +optional - Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` - - // The type of Patch. Currently we only allow "JSONPatch". - // +optional - PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` - - // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). - // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with - // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by - // the admission webhook to add additional context to the audit log for this request. - // +optional - AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` - - // warnings is a list of warning messages to return to the requesting API client. - // Warning messages describe a problem the client making the API request should correct or be aware of. - // Limit warnings to 120 characters if possible. - // Warnings over 256 characters and large numbers of warnings may be truncated. - // +optional - Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` -} - -// PatchType is the type of patch being used to represent the mutated object -type PatchType string - -// PatchType constants. -const ( - PatchTypeJSONPatch PatchType = "JSONPatch" -) - -// Operation is the type of resource operation being checked for admission control -type Operation string - -// Operation constants -const ( - Create Operation = "CREATE" - Update Operation = "UPDATE" - Delete Operation = "DELETE" - Connect Operation = "CONNECT" -) diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 82598ed573..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AdmissionRequest = map[string]string{ - "": "AdmissionRequest describes the admission.Attributes for the admission request.", - "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", - "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", - "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", - "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", - "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", - "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", - "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", - "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", - "namespace": "Namespace is the namespace associated with the request (if any).", - "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", - "userInfo": "UserInfo is information about the requesting user", - "object": "Object is the object from the incoming request.", - "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", - "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", - "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", -} - -func (AdmissionRequest) SwaggerDoc() map[string]string { - return map_AdmissionRequest -} - -var map_AdmissionResponse = map[string]string{ - "": "AdmissionResponse describes an admission response.", - "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.", - "allowed": "Allowed indicates whether or not the admission request was permitted.", - "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", - "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", - "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", - "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", - "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", -} - -func (AdmissionResponse) SwaggerDoc() map[string]string { - return map_AdmissionResponse -} - -var map_AdmissionReview = map[string]string{ - "": "AdmissionReview describes an admission review request/response.", - "request": "Request describes the attributes for the admission request.", - "response": "Response describes the attributes for the admission response.", -} - -func (AdmissionReview) SwaggerDoc() map[string]string { - return map_AdmissionReview -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 8234b322f9..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,142 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { - *out = *in - out.Kind = in.Kind - out.Resource = in.Resource - if in.RequestKind != nil { - in, out := &in.RequestKind, &out.RequestKind - *out = new(v1.GroupVersionKind) - **out = **in - } - if in.RequestResource != nil { - in, out := &in.RequestResource, &out.RequestResource - *out = new(v1.GroupVersionResource) - **out = **in - } - in.UserInfo.DeepCopyInto(&out.UserInfo) - in.Object.DeepCopyInto(&out.Object) - in.OldObject.DeepCopyInto(&out.OldObject) - if in.DryRun != nil { - in, out := &in.DryRun, &out.DryRun - *out = new(bool) - **out = **in - } - in.Options.DeepCopyInto(&out.Options) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. -func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { - if in == nil { - return nil - } - out := new(AdmissionRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { - *out = *in - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(v1.Status) - (*in).DeepCopyInto(*out) - } - if in.Patch != nil { - in, out := &in.Patch, &out.Patch - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.PatchType != nil { - in, out := &in.PatchType, &out.PatchType - *out = new(PatchType) - **out = **in - } - if in.AuditAnnotations != nil { - in, out := &in.AuditAnnotations, &out.AuditAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Warnings != nil { - in, out := &in.Warnings, &out.Warnings - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. -func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { - if in == nil { - return nil - } - out := new(AdmissionResponse) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = new(AdmissionRequest) - (*in).DeepCopyInto(*out) - } - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = new(AdmissionResponse) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. -func (in *AdmissionReview) DeepCopy() *AdmissionReview { - if in == nil { - return nil - } - out := new(AdmissionReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AdmissionReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go b/openshift/tools/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index f96e8a4433..0000000000 --- a/openshift/tools/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1beta1 - -import ( - schema "k8s.io/apimachinery/pkg/runtime/schema" -) - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { - return 1, 9 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *AdmissionReview) APILifecycleDeprecated() (major, minor int) { - return 1, 19 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *AdmissionReview) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "admission.k8s.io", Version: "v1", Kind: "AdmissionReview"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *AdmissionReview) APILifecycleRemoved() (major, minor int) { - return 1, 22 -} diff --git a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 261ae41bd0..bf1ae59488 100644 --- a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1" v11 "k8s.io/api/admissionregistration/v1" k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,10 +47,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} } +func (*ApplyConfiguration) ProtoMessage() {} +func (*ApplyConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{0} +} +func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ApplyConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyConfiguration.Merge(m, src) +} +func (m *ApplyConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ApplyConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo + func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } func (*AuditAnnotation) ProtoMessage() {} func (*AuditAnnotation) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{0} + return fileDescriptor_7f7c65a4f012fb19, []int{1} } func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +106,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } func (*ExpressionWarning) ProtoMessage() {} func (*ExpressionWarning) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{1} + return fileDescriptor_7f7c65a4f012fb19, []int{2} } func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,10 +131,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() { var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo +func (m *JSONPatch) Reset() { *m = JSONPatch{} } +func (*JSONPatch) ProtoMessage() {} +func (*JSONPatch) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{3} +} +func (m *JSONPatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONPatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONPatch.Merge(m, src) +} +func (m *JSONPatch) XXX_Size() int { + return m.Size() +} +func (m *JSONPatch) XXX_DiscardUnknown() { + xxx_messageInfo_JSONPatch.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONPatch proto.InternalMessageInfo + func (m *MatchCondition) Reset() { *m = MatchCondition{} } func (*MatchCondition) ProtoMessage() {} func (*MatchCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{2} + return fileDescriptor_7f7c65a4f012fb19, []int{4} } func (m *MatchCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +190,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{3} + return fileDescriptor_7f7c65a4f012fb19, []int{5} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,10 +215,178 @@ func (m *MatchResources) XXX_DiscardUnknown() { var xxx_messageInfo_MatchResources proto.InternalMessageInfo +func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} } +func (*MutatingAdmissionPolicy) ProtoMessage() {} +func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{6} +} +func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src) +} +func (m *MutatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} } +func (*MutatingAdmissionPolicyBinding) ProtoMessage() {} +func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{7} +} +func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} } +func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{8} +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} } +func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{9} +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} } +func (*MutatingAdmissionPolicyList) ProtoMessage() {} +func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{10} +} +func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} } +func (*MutatingAdmissionPolicySpec) ProtoMessage() {} +func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{11} +} +func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{4} + return fileDescriptor_7f7c65a4f012fb19, []int{12} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +414,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{5} + return fileDescriptor_7f7c65a4f012fb19, []int{13} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +442,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{6} + return fileDescriptor_7f7c65a4f012fb19, []int{14} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,10 +467,38 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo +func (m *Mutation) Reset() { *m = Mutation{} } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{15} +} +func (m *Mutation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Mutation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mutation.Merge(m, src) +} +func (m *Mutation) XXX_Size() int { + return m.Size() +} +func (m *Mutation) XXX_DiscardUnknown() { + xxx_messageInfo_Mutation.DiscardUnknown(m) +} + +var xxx_messageInfo_Mutation proto.InternalMessageInfo + func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{7} + return fileDescriptor_7f7c65a4f012fb19, []int{16} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +526,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{8} + return fileDescriptor_7f7c65a4f012fb19, []int{17} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +554,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{9} + return fileDescriptor_7f7c65a4f012fb19, []int{18} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +582,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{10} + return fileDescriptor_7f7c65a4f012fb19, []int{19} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +610,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *TypeChecking) Reset() { *m = TypeChecking{} } func (*TypeChecking) ProtoMessage() {} func (*TypeChecking) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{11} + return fileDescriptor_7f7c65a4f012fb19, []int{20} } func (m *TypeChecking) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +638,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{12} + return fileDescriptor_7f7c65a4f012fb19, []int{21} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +666,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{13} + return fileDescriptor_7f7c65a4f012fb19, []int{22} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +694,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{14} + return fileDescriptor_7f7c65a4f012fb19, []int{23} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +722,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{15} + return fileDescriptor_7f7c65a4f012fb19, []int{24} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +750,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{16} + return fileDescriptor_7f7c65a4f012fb19, []int{25} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -525,7 +778,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{17} + return fileDescriptor_7f7c65a4f012fb19, []int{26} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -553,7 +806,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{18} + return fileDescriptor_7f7c65a4f012fb19, []int{27} } func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,7 +834,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{19} + return fileDescriptor_7f7c65a4f012fb19, []int{28} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -609,7 +862,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{20} + return fileDescriptor_7f7c65a4f012fb19, []int{29} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -637,7 +890,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{21} + return fileDescriptor_7f7c65a4f012fb19, []int{30} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -665,7 +918,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{22} + return fileDescriptor_7f7c65a4f012fb19, []int{31} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -693,7 +946,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo func (m *Variable) Reset() { *m = Variable{} } func (*Variable) ProtoMessage() {} func (*Variable) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{23} + return fileDescriptor_7f7c65a4f012fb19, []int{32} } func (m *Variable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -721,7 +974,7 @@ var xxx_messageInfo_Variable proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{24} + return fileDescriptor_7f7c65a4f012fb19, []int{33} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -747,13 +1000,22 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ApplyConfiguration") proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation") proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning") + proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1beta1.JSONPatch") proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition") proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources") + proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicy") + proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding") + proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingList") + proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingSpec") + proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyList") + proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicySpec") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1beta1.Mutation") proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations") proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind") proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef") @@ -779,130 +1041,174 @@ func init() { } var fileDescriptor_7f7c65a4f012fb19 = []byte{ - // 1957 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7, - 0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85, - 0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30, - 0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b, - 0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed, - 0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2, - 0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f, - 0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde, - 0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f, - 0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b, - 0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36, - 0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0, - 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, - 0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a, - 0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c, - 0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8, - 0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb, - 0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1, - 0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4, - 0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38, - 0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48, - 0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20, - 0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6, - 0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e, - 0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22, - 0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9, - 0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5, - 0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b, - 0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88, - 0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d, - 0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e, - 0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7, - 0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7, - 0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e, - 0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa, - 0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20, - 0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36, - 0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0, - 0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b, - 0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a, - 0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58, - 0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18, - 0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19, - 0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e, - 0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad, - 0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e, - 0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c, - 0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d, - 0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e, - 0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43, - 0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0, - 0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee, - 0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9, - 0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c, - 0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc, - 0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd, - 0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20, - 0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5, - 0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8, - 0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6, - 0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2, - 0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90, - 0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81, - 0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec, - 0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba, - 0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf, - 0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3, - 0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d, - 0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d, - 0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39, - 0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51, - 0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef, - 0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97, - 0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02, - 0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01, - 0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18, - 0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22, - 0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd, - 0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40, - 0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73, - 0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3, - 0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3, - 0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3, - 0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02, - 0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d, - 0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72, - 0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c, - 0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c, - 0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c, - 0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88, - 0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3, - 0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85, - 0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73, - 0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd, - 0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3, - 0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11, - 0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf, - 0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b, - 0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38, - 0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7, - 0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b, - 0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa, - 0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa, - 0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85, - 0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d, - 0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8, - 0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb, - 0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7, - 0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11, - 0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f, - 0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a, - 0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96, - 0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b, - 0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5, - 0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25, - 0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98, - 0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56, - 0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf, - 0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae, - 0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8, - 0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57, - 0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8, - 0x4a, 0x10, 0x21, 0x00, 0x00, + // 2215 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0x15, 0xf6, 0x92, 0x92, 0x45, 0x3e, 0xca, 0x92, 0x38, 0x71, 0x2a, 0xfa, 0x8f, 0x14, 0x16, 0x41, + 0x21, 0x03, 0x2d, 0x59, 0x2b, 0x41, 0xe2, 0x3a, 0x29, 0x02, 0xae, 0x62, 0x3b, 0x76, 0x24, 0x59, + 0x18, 0x39, 0x52, 0xd1, 0x26, 0x40, 0x56, 0xcb, 0x21, 0xb9, 0x11, 0xb9, 0xcb, 0xee, 0x2c, 0x65, + 0xab, 0x05, 0xda, 0x02, 0x2d, 0x90, 0x1e, 0x0b, 0xf4, 0x52, 0xa0, 0xa7, 0xde, 0x7b, 0x69, 0xef, + 0x05, 0x7a, 0xf4, 0x31, 0xb7, 0x1a, 0x28, 0x4a, 0x54, 0x4c, 0xd1, 0x9e, 0x7a, 0x48, 0x81, 0xf6, + 0xa0, 0x4b, 0x8b, 0x99, 0x9d, 0xfd, 0xdf, 0x95, 0x56, 0xb2, 0x2c, 0x17, 0x85, 0x6f, 0xda, 0xf7, + 0xe6, 0xbd, 0x37, 0xef, 0xcd, 0x9b, 0xf7, 0xbe, 0x79, 0x22, 0xdc, 0xdc, 0xb9, 0x49, 0xeb, 0xba, + 0xd9, 0x50, 0x07, 0x7a, 0x43, 0x6d, 0xf5, 0x75, 0x4a, 0x75, 0xd3, 0xb0, 0x48, 0x47, 0xa7, 0xb6, + 0xa5, 0xda, 0xba, 0x69, 0x34, 0x76, 0x6f, 0x6c, 0x13, 0x5b, 0xbd, 0xd1, 0xe8, 0x10, 0x83, 0x58, + 0xaa, 0x4d, 0x5a, 0xf5, 0x81, 0x65, 0xda, 0x26, 0x5a, 0x74, 0x24, 0xeb, 0xea, 0x40, 0xaf, 0x27, + 0x4a, 0xd6, 0x85, 0xe4, 0xe5, 0xaf, 0x77, 0x74, 0xbb, 0x3b, 0xdc, 0xae, 0x6b, 0x66, 0xbf, 0xd1, + 0x31, 0x3b, 0x66, 0x83, 0x2b, 0xd8, 0x1e, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x39, 0x8a, 0x2f, + 0xbf, 0x9e, 0x61, 0x4b, 0xd1, 0xdd, 0x5c, 0x7e, 0xc3, 0x17, 0xea, 0xab, 0x5a, 0x57, 0x37, 0x88, + 0xb5, 0xd7, 0x18, 0xec, 0x74, 0x18, 0x81, 0x36, 0xfa, 0xc4, 0x56, 0x93, 0xa4, 0x1a, 0x69, 0x52, + 0xd6, 0xd0, 0xb0, 0xf5, 0x3e, 0x89, 0x09, 0xbc, 0x79, 0x94, 0x00, 0xd5, 0xba, 0xa4, 0xaf, 0x46, + 0xe5, 0xe4, 0xf7, 0x01, 0x35, 0x07, 0x83, 0xde, 0xde, 0xb2, 0x69, 0xb4, 0xf5, 0xce, 0xd0, 0xf1, + 0x03, 0x2d, 0x01, 0x90, 0xc7, 0x03, 0x8b, 0x70, 0x0f, 0x2b, 0xd2, 0x82, 0xb4, 0x58, 0x54, 0xd0, + 0x93, 0x51, 0xed, 0xdc, 0x78, 0x54, 0x83, 0xdb, 0x1e, 0x07, 0x07, 0x56, 0xc9, 0x14, 0x66, 0x9b, + 0xc3, 0x96, 0x6e, 0x37, 0x0d, 0xc3, 0xb4, 0x1d, 0x35, 0xd7, 0x20, 0xbf, 0x43, 0xf6, 0x84, 0x7c, + 0x49, 0xc8, 0xe7, 0x3f, 0x20, 0x7b, 0x98, 0xd1, 0x51, 0x13, 0x66, 0x77, 0xd5, 0xde, 0x90, 0xf8, + 0x0a, 0x2b, 0x39, 0xbe, 0x74, 0x5e, 0x2c, 0x9d, 0xdd, 0x0c, 0xb3, 0x71, 0x74, 0xbd, 0xdc, 0x83, + 0xb2, 0xff, 0xb5, 0xa5, 0x5a, 0x86, 0x6e, 0x74, 0xd0, 0xd7, 0xa0, 0xd0, 0xd6, 0x49, 0xaf, 0x85, + 0x49, 0x5b, 0x28, 0x9c, 0x13, 0x0a, 0x0b, 0x77, 0x04, 0x1d, 0x7b, 0x2b, 0xd0, 0x75, 0x98, 0x7a, + 0xe4, 0x08, 0x56, 0xf2, 0x7c, 0xf1, 0xac, 0x58, 0x3c, 0x25, 0xf4, 0x61, 0x97, 0x2f, 0xbf, 0x0b, + 0xc5, 0xfb, 0x1b, 0x0f, 0xd6, 0xd6, 0x55, 0x5b, 0xeb, 0x9e, 0x28, 0x46, 0x6d, 0x98, 0x59, 0x65, + 0xc2, 0xcb, 0xa6, 0xd1, 0xd2, 0x79, 0x88, 0x16, 0x60, 0xc2, 0x50, 0xfb, 0x44, 0xc8, 0x4f, 0x0b, + 0xf9, 0x89, 0x35, 0xb5, 0x4f, 0x30, 0xe7, 0x44, 0xec, 0xe4, 0x32, 0xd9, 0xf9, 0xe3, 0x84, 0x30, + 0x84, 0x09, 0x35, 0x87, 0x96, 0x46, 0x28, 0x7a, 0x0c, 0x65, 0xa6, 0x8e, 0x0e, 0x54, 0x8d, 0x6c, + 0x90, 0x1e, 0xd1, 0x6c, 0xd3, 0xe2, 0x56, 0x4b, 0x4b, 0xaf, 0xd7, 0xfd, 0x1b, 0xe3, 0x25, 0x4f, + 0x7d, 0xb0, 0xd3, 0x61, 0x04, 0x5a, 0x67, 0x39, 0x5a, 0xdf, 0xbd, 0x51, 0x5f, 0x51, 0xb7, 0x49, + 0xcf, 0x15, 0x55, 0x5e, 0x1d, 0x8f, 0x6a, 0xe5, 0xb5, 0xa8, 0x46, 0x1c, 0x37, 0x82, 0x4c, 0x98, + 0x31, 0xb7, 0x3f, 0x25, 0x9a, 0xed, 0x99, 0xcd, 0x9d, 0xdc, 0x2c, 0x1a, 0x8f, 0x6a, 0x33, 0x0f, + 0x42, 0xea, 0x70, 0x44, 0x3d, 0xfa, 0x21, 0x5c, 0xb0, 0x84, 0xdf, 0x78, 0xd8, 0x23, 0xb4, 0x92, + 0x5f, 0xc8, 0x2f, 0x96, 0x96, 0x9a, 0xf5, 0xac, 0x85, 0xa1, 0xce, 0xfc, 0x6a, 0x31, 0xd9, 0x2d, + 0xdd, 0xee, 0x3e, 0x18, 0x10, 0x87, 0x4d, 0x95, 0x57, 0x45, 0xdc, 0x2f, 0xe0, 0xa0, 0x7e, 0x1c, + 0x36, 0x87, 0x7e, 0x21, 0xc1, 0x45, 0xf2, 0x58, 0xeb, 0x0d, 0x5b, 0x24, 0xb4, 0xae, 0x32, 0x71, + 0x5a, 0xfb, 0xb8, 0x2a, 0xf6, 0x71, 0xf1, 0x76, 0x82, 0x19, 0x9c, 0x68, 0x1c, 0xbd, 0x07, 0xa5, + 0x3e, 0x4b, 0x89, 0x75, 0xb3, 0xa7, 0x6b, 0x7b, 0x95, 0x29, 0x9e, 0x48, 0xf2, 0x78, 0x54, 0x2b, + 0xad, 0xfa, 0xe4, 0x83, 0x51, 0x6d, 0x36, 0xf0, 0xf9, 0x70, 0x6f, 0x40, 0x70, 0x50, 0x4c, 0xfe, + 0xab, 0x04, 0xf3, 0xab, 0x43, 0x76, 0xbf, 0x8d, 0x4e, 0xd3, 0xdd, 0xbb, 0xc3, 0x43, 0x9f, 0x40, + 0x81, 0x1d, 0x5a, 0x4b, 0xb5, 0x55, 0x91, 0x59, 0xdf, 0xc8, 0x76, 0xc4, 0xce, 0x79, 0xae, 0x12, + 0x5b, 0xf5, 0x33, 0xdb, 0xa7, 0x61, 0x4f, 0x2b, 0xea, 0xc0, 0x04, 0x1d, 0x10, 0x4d, 0x24, 0xd0, + 0xed, 0xec, 0x81, 0x4c, 0xd9, 0xf2, 0xc6, 0x80, 0x68, 0xfe, 0xa5, 0x63, 0x5f, 0x98, 0x1b, 0x90, + 0xff, 0x29, 0x41, 0x35, 0x45, 0x46, 0xd1, 0x8d, 0x16, 0xab, 0x32, 0xcf, 0xdf, 0x5b, 0x23, 0xe4, + 0xed, 0xca, 0x33, 0x7b, 0x2b, 0x76, 0x9e, 0xea, 0xf4, 0x97, 0x12, 0xc8, 0x87, 0x8b, 0xae, 0xe8, + 0xd4, 0x46, 0x1f, 0xc5, 0x1c, 0xaf, 0x67, 0xbc, 0xc9, 0x3a, 0x75, 0xdc, 0xf6, 0xca, 0xb1, 0x4b, + 0x09, 0x38, 0xdd, 0x87, 0x49, 0xdd, 0x26, 0x7d, 0x5a, 0xc9, 0xf1, 0xcb, 0xf2, 0xfe, 0x69, 0x79, + 0xad, 0x5c, 0x10, 0x46, 0x27, 0xef, 0x31, 0xf5, 0xd8, 0xb1, 0x22, 0xff, 0x26, 0x77, 0x94, 0xcf, + 0x2c, 0x40, 0xac, 0x08, 0x0f, 0x38, 0x71, 0xcd, 0x2f, 0xd6, 0xde, 0xe1, 0xad, 0x7b, 0x1c, 0x1c, + 0x58, 0xc5, 0xe2, 0x34, 0x50, 0x2d, 0xb5, 0xef, 0xb6, 0xa1, 0xd2, 0xd2, 0x52, 0x76, 0x67, 0xd6, + 0x85, 0xa4, 0x32, 0xcd, 0xe2, 0xe4, 0x7e, 0x61, 0x4f, 0x23, 0xb2, 0x61, 0xa6, 0x1f, 0xaa, 0xf0, + 0xbc, 0x7b, 0x95, 0x96, 0x6e, 0x1e, 0x23, 0x60, 0x21, 0x79, 0xa7, 0xb4, 0x86, 0x69, 0x38, 0x62, + 0x43, 0xfe, 0x42, 0x82, 0x2b, 0x29, 0xe1, 0x3a, 0x83, 0xdc, 0x68, 0x87, 0x73, 0xa3, 0xf9, 0xec, + 0xb9, 0x91, 0x9c, 0x14, 0xbf, 0x3a, 0x9f, 0xea, 0x25, 0xcf, 0x86, 0x4f, 0xa0, 0xc8, 0xcf, 0xe1, + 0x03, 0xdd, 0x68, 0x25, 0xf4, 0xd0, 0x2c, 0x47, 0xcb, 0x44, 0x95, 0x0b, 0xe3, 0x51, 0xad, 0xe8, + 0x7d, 0x62, 0x5f, 0x29, 0xfa, 0x3e, 0xcc, 0xf5, 0x05, 0x50, 0x60, 0xf2, 0xba, 0x61, 0x53, 0x91, + 0x43, 0x27, 0x3f, 0xdf, 0x8b, 0xe3, 0x51, 0x6d, 0x6e, 0x35, 0xa2, 0x15, 0xc7, 0xec, 0x20, 0x0d, + 0x8a, 0xbb, 0xaa, 0xa5, 0xab, 0xdb, 0x7e, 0xeb, 0x3c, 0x46, 0xe2, 0x6e, 0x0a, 0x51, 0xa5, 0x2c, + 0x42, 0x5b, 0x74, 0x29, 0x14, 0xfb, 0x7a, 0x99, 0x91, 0xfe, 0xd0, 0x81, 0x89, 0x6e, 0x5f, 0x5c, + 0x3a, 0xee, 0x71, 0x9a, 0x86, 0x6f, 0xc4, 0xa5, 0x50, 0xec, 0xeb, 0x45, 0x2b, 0x70, 0xa1, 0xad, + 0xea, 0xbd, 0xa1, 0x45, 0x44, 0xd3, 0x9b, 0xe4, 0x17, 0xf7, 0xab, 0xac, 0x83, 0xdf, 0x09, 0x32, + 0x0e, 0x46, 0xb5, 0x72, 0x88, 0xc0, 0x1b, 0x5f, 0x58, 0x18, 0xfd, 0x00, 0x66, 0xfb, 0x21, 0xf0, + 0x46, 0x2b, 0xe7, 0xf9, 0xc6, 0x8f, 0x7b, 0x24, 0x9e, 0x02, 0x1f, 0xe8, 0x86, 0xe9, 0x14, 0x47, + 0x2d, 0xa1, 0x9f, 0x49, 0x80, 0x2c, 0xa2, 0x1b, 0xbb, 0xa6, 0xc6, 0x35, 0x86, 0xba, 0xf8, 0xb7, + 0x85, 0x1a, 0x84, 0x63, 0x2b, 0x0e, 0x46, 0xb5, 0x5b, 0x19, 0x9e, 0x2d, 0xf5, 0xb8, 0x24, 0x0f, + 0x41, 0x82, 0x4d, 0xf9, 0x6f, 0x05, 0x98, 0x75, 0x6f, 0xc7, 0x16, 0xd9, 0xee, 0x9a, 0xe6, 0x4e, + 0x06, 0x18, 0xfb, 0x08, 0xa6, 0xb5, 0x9e, 0x4e, 0x0c, 0xdb, 0x79, 0x69, 0x88, 0x6c, 0xfe, 0x56, + 0xf6, 0xd0, 0x09, 0x53, 0xcb, 0x01, 0x25, 0xca, 0x45, 0x61, 0x68, 0x3a, 0x48, 0xc5, 0x21, 0x43, + 0xe8, 0x23, 0x98, 0xb4, 0x02, 0x28, 0xf0, 0xad, 0x2c, 0x16, 0xeb, 0x09, 0x98, 0xcb, 0x2b, 0x15, + 0x0e, 0xc8, 0x72, 0x94, 0xc6, 0x53, 0x6c, 0xe2, 0x59, 0x52, 0x2c, 0x82, 0xd1, 0x8a, 0x27, 0xc2, + 0x68, 0xc9, 0x50, 0x7f, 0xf2, 0xc5, 0x40, 0xfd, 0xd2, 0xf3, 0x85, 0xfa, 0xef, 0x41, 0x89, 0xea, + 0x2d, 0x72, 0xbb, 0xdd, 0x26, 0x9a, 0xcd, 0xee, 0xa3, 0x17, 0xb0, 0x0d, 0x9f, 0xcc, 0x02, 0xe6, + 0x7f, 0x2e, 0xf7, 0x54, 0x4a, 0x71, 0x50, 0x0c, 0xdd, 0x82, 0x19, 0xf6, 0x46, 0x36, 0x87, 0xf6, + 0x06, 0xd1, 0x4c, 0xa3, 0x45, 0xf9, 0xbd, 0x9a, 0x74, 0x76, 0xf0, 0x30, 0xc4, 0xc1, 0x91, 0x95, + 0xe8, 0x43, 0x98, 0xf7, 0xb2, 0x08, 0x93, 0x5d, 0x9d, 0x3c, 0xda, 0x24, 0x16, 0xe5, 0xd5, 0xa1, + 0xb0, 0x90, 0x5f, 0x2c, 0x2a, 0x57, 0xc6, 0xa3, 0xda, 0x7c, 0x33, 0x79, 0x09, 0x4e, 0x93, 0x45, + 0x3f, 0x4d, 0xbe, 0xef, 0xc0, 0x1d, 0x7c, 0x78, 0x56, 0x77, 0x3d, 0xa9, 0xe6, 0x4d, 0x9f, 0x55, + 0xcd, 0x93, 0xff, 0x2c, 0xc1, 0xd5, 0x48, 0xa1, 0x09, 0x8f, 0x29, 0x9e, 0x3f, 0x04, 0xff, 0x2e, + 0x14, 0x84, 0x65, 0x17, 0x74, 0x7c, 0xf3, 0xf8, 0xa0, 0x43, 0x68, 0x50, 0x26, 0x98, 0x29, 0xec, + 0x29, 0x94, 0xff, 0x21, 0xc1, 0xc2, 0x61, 0xfe, 0x9d, 0x01, 0xa2, 0xda, 0x09, 0x23, 0xaa, 0x3b, + 0x27, 0x76, 0x2e, 0xb4, 0xf1, 0x14, 0x58, 0xf5, 0xdb, 0x1c, 0x14, 0xdc, 0x3e, 0x8d, 0xde, 0x61, + 0x18, 0xca, 0xd6, 0xba, 0x2c, 0xf5, 0xc4, 0x54, 0xa3, 0xea, 0x36, 0xf3, 0x75, 0x97, 0x71, 0x10, + 0xfc, 0xc0, 0xbe, 0x00, 0xbf, 0x1e, 0x6a, 0x6c, 0x6e, 0x25, 0x20, 0xf0, 0x3b, 0xd9, 0xbd, 0x88, + 0xcf, 0xbe, 0x94, 0xaf, 0xb0, 0xcb, 0x15, 0xa7, 0xe3, 0x04, 0x7b, 0x0c, 0x08, 0x7e, 0x4a, 0x4d, + 0x83, 0x6f, 0x91, 0x57, 0xfe, 0x63, 0x01, 0x41, 0x6f, 0x96, 0xe4, 0x00, 0x41, 0xef, 0x13, 0xfb, + 0x4a, 0xe5, 0xa7, 0x12, 0xcc, 0xa7, 0x4c, 0x01, 0xd0, 0x5b, 0xfe, 0x9c, 0x83, 0x57, 0xe7, 0x8a, + 0xc4, 0x0b, 0x4e, 0x39, 0x38, 0xa0, 0xe0, 0x0c, 0x1c, 0x5e, 0x87, 0x7e, 0xc2, 0x8a, 0x4b, 0x4c, + 0x9f, 0x68, 0xc9, 0x27, 0x6e, 0x90, 0x97, 0x3d, 0x14, 0x12, 0xe3, 0xe1, 0x04, 0x73, 0xb2, 0x0a, + 0x3e, 0xf6, 0x65, 0x0f, 0x2c, 0x75, 0xa0, 0x8b, 0xf2, 0x17, 0x7d, 0x60, 0x35, 0xd7, 0xef, 0x09, + 0x0e, 0x0e, 0xac, 0x62, 0xa0, 0x63, 0x87, 0x21, 0xf0, 0x5c, 0x18, 0x74, 0x70, 0x2c, 0xcd, 0x39, + 0xf2, 0xef, 0x72, 0xe0, 0xbd, 0x9d, 0x32, 0x60, 0x94, 0x06, 0x14, 0xbd, 0x9e, 0x26, 0xb4, 0x7a, + 0x00, 0xd3, 0xeb, 0x7f, 0xd8, 0x5f, 0x83, 0x3e, 0x86, 0x02, 0x75, 0x3b, 0x5d, 0xfe, 0xe4, 0x9d, + 0x8e, 0xbf, 0xf1, 0xbc, 0x1e, 0xe7, 0xa9, 0x44, 0x36, 0xcc, 0xf3, 0x27, 0x01, 0xb1, 0x89, 0xb5, + 0x66, 0xda, 0x77, 0xcc, 0xa1, 0xd1, 0x6a, 0x6a, 0x3c, 0xd3, 0x1d, 0x98, 0x71, 0x8b, 0xf5, 0x96, + 0xf5, 0xe4, 0x25, 0x07, 0xa3, 0xda, 0x95, 0x14, 0x16, 0xbf, 0x4d, 0x69, 0xaa, 0xe5, 0x5f, 0x4b, + 0x30, 0xb7, 0x41, 0xac, 0x5d, 0x5d, 0x23, 0x98, 0xb4, 0x89, 0x45, 0x0c, 0x2d, 0x12, 0x1a, 0x29, + 0x43, 0x68, 0xdc, 0x68, 0xe7, 0x52, 0xa3, 0x7d, 0x15, 0x26, 0x06, 0xaa, 0xdd, 0x15, 0x53, 0xd7, + 0x02, 0xe3, 0xae, 0xab, 0x76, 0x17, 0x73, 0x2a, 0xe7, 0x9a, 0x96, 0xcd, 0x1d, 0x9d, 0x14, 0x5c, + 0xd3, 0xb2, 0x31, 0xa7, 0xca, 0xbf, 0x94, 0x60, 0x9a, 0x79, 0xb1, 0xdc, 0x25, 0xda, 0x8e, 0x6e, + 0x74, 0xd0, 0x67, 0x12, 0x20, 0x12, 0x9d, 0x04, 0x3b, 0x37, 0xa2, 0xb4, 0xf4, 0x76, 0xf6, 0x3b, + 0x19, 0x9b, 0x26, 0xfb, 0x69, 0x1d, 0x63, 0x51, 0x9c, 0x60, 0x52, 0xfe, 0x53, 0x0e, 0x2e, 0x6d, + 0xaa, 0x3d, 0xbd, 0xf5, 0x82, 0x66, 0x64, 0x7a, 0x68, 0x6a, 0x74, 0xf7, 0x38, 0x2f, 0xb7, 0x94, + 0x4d, 0xa7, 0x0d, 0x8c, 0xd0, 0xf7, 0xe0, 0x3c, 0xb5, 0x55, 0x7b, 0xe8, 0xce, 0x1e, 0xee, 0x9d, + 0x86, 0x31, 0xae, 0x50, 0x99, 0x11, 0xe6, 0xce, 0x3b, 0xdf, 0x58, 0x18, 0x92, 0xff, 0x2d, 0xc1, + 0x42, 0xaa, 0xec, 0xd9, 0x8d, 0xe6, 0x06, 0xa1, 0x20, 0xaf, 0x9d, 0x82, 0xdf, 0x47, 0x0d, 0xe7, + 0xfe, 0x25, 0xc1, 0x6b, 0x47, 0x09, 0x9f, 0x01, 0x60, 0x30, 0xc3, 0x80, 0xe1, 0xfe, 0xe9, 0x79, + 0x9e, 0x02, 0x1a, 0x3e, 0xcb, 0x1f, 0xed, 0xf7, 0xcb, 0x11, 0x5d, 0xe0, 0x1f, 0x3d, 0x5b, 0x50, + 0xde, 0x15, 0xf1, 0x32, 0x0d, 0xa7, 0xa4, 0x3b, 0x13, 0x96, 0xa2, 0x72, 0x9d, 0x3d, 0xe4, 0x36, + 0xa3, 0xcc, 0x83, 0x51, 0x6d, 0x2e, 0x4a, 0xc4, 0x71, 0x1d, 0xf2, 0xdf, 0x25, 0xb8, 0x96, 0x7a, + 0x12, 0x67, 0x90, 0x7a, 0xdd, 0x70, 0xea, 0x2d, 0x9f, 0x46, 0xea, 0xa5, 0xce, 0xff, 0xae, 0x1d, + 0x5a, 0x0d, 0xff, 0xcf, 0x27, 0x80, 0x3b, 0x50, 0xf2, 0x8f, 0xdf, 0x1d, 0x9c, 0xbc, 0x71, 0xfc, + 0x78, 0x9b, 0x86, 0xf2, 0x8a, 0x08, 0x70, 0xc9, 0xa7, 0x51, 0x1c, 0xd4, 0x7e, 0xca, 0x13, 0x94, + 0x1f, 0xc1, 0x9c, 0x1a, 0xfe, 0x2f, 0x34, 0xad, 0x4c, 0x1e, 0xf7, 0xe1, 0x16, 0xf9, 0x3f, 0xb6, + 0x52, 0x11, 0x4e, 0xcc, 0x45, 0x18, 0x14, 0xc7, 0x8c, 0xbd, 0xd8, 0x29, 0x61, 0x68, 0x74, 0x3b, + 0xf5, 0x7c, 0x46, 0xb7, 0xf2, 0x1f, 0x72, 0x50, 0x3b, 0xa2, 0x7d, 0xa3, 0xfb, 0x80, 0xcc, 0x6d, + 0x4a, 0xac, 0x5d, 0xd2, 0xba, 0xeb, 0xfc, 0xe2, 0xc0, 0x85, 0xf5, 0x79, 0x1f, 0x50, 0x3d, 0x88, + 0xad, 0xc0, 0x09, 0x52, 0xa8, 0x07, 0xd3, 0x76, 0x00, 0xea, 0x89, 0x5b, 0xf0, 0x66, 0x76, 0xbf, + 0x82, 0x40, 0x51, 0x99, 0x1b, 0x8f, 0x6a, 0x21, 0xe8, 0x88, 0x43, 0xda, 0x91, 0x06, 0xa0, 0xf9, + 0x47, 0xe7, 0xa4, 0x7e, 0x23, 0x5b, 0x15, 0xf3, 0x4f, 0xcc, 0xeb, 0x3b, 0x81, 0xc3, 0x0a, 0xa8, + 0x95, 0xf7, 0xa7, 0xa0, 0xec, 0x87, 0xf0, 0xe5, 0x10, 0xf5, 0xe5, 0x10, 0xf5, 0xd0, 0x21, 0x2a, + 0xbc, 0x1c, 0xa2, 0x9e, 0x68, 0x88, 0x9a, 0x50, 0x8b, 0x4b, 0x67, 0x36, 0xbd, 0xdc, 0x97, 0xa0, + 0x1a, 0xbb, 0xe3, 0x67, 0x3d, 0xbf, 0xfc, 0x38, 0x36, 0xbf, 0x7c, 0xfb, 0x24, 0xb0, 0x29, 0x6d, + 0x82, 0xf9, 0xa5, 0x04, 0xf2, 0xe1, 0x3e, 0xfe, 0x4f, 0xff, 0x62, 0xe0, 0xf0, 0xad, 0xa7, 0x80, + 0xc3, 0xff, 0x48, 0x00, 0x3e, 0x98, 0x41, 0xaf, 0x41, 0xe0, 0x47, 0x58, 0xa2, 0x74, 0x3b, 0x61, + 0x0a, 0xd0, 0xd1, 0x75, 0x98, 0xea, 0x13, 0x4a, 0xd5, 0x8e, 0x3b, 0x10, 0xf1, 0x7e, 0x64, 0xb6, + 0xea, 0x90, 0xb1, 0xcb, 0x47, 0x5b, 0x70, 0xde, 0x22, 0x2a, 0x15, 0xd3, 0xcc, 0xa2, 0xf2, 0x2e, + 0x7b, 0x05, 0x63, 0x4e, 0x39, 0x18, 0xd5, 0x6e, 0x64, 0xf9, 0x39, 0x61, 0x5d, 0x3c, 0x9a, 0xb9, + 0x10, 0x16, 0xea, 0xd0, 0x5d, 0x28, 0x0b, 0x1b, 0x81, 0x0d, 0x3b, 0x95, 0xf6, 0x92, 0xd8, 0x4d, + 0x79, 0x35, 0xba, 0x00, 0xc7, 0x65, 0xe4, 0xfb, 0x50, 0x70, 0x81, 0x01, 0xaa, 0xc0, 0x44, 0xe0, + 0xbd, 0xe5, 0x38, 0xce, 0x29, 0x91, 0xc0, 0xe4, 0x92, 0x03, 0x23, 0xff, 0x5e, 0x82, 0x57, 0x12, + 0x9a, 0x12, 0xba, 0x04, 0xf9, 0xa1, 0xd5, 0x13, 0x21, 0x98, 0x1a, 0x8f, 0x6a, 0xf9, 0x0f, 0xf1, + 0x0a, 0x66, 0x34, 0xa4, 0xc2, 0x14, 0x75, 0xc6, 0x53, 0x22, 0x99, 0x6e, 0x65, 0x3f, 0xf1, 0xe8, + 0x5c, 0x4b, 0x29, 0xb1, 0x33, 0x70, 0xa9, 0xae, 0x5e, 0xb4, 0x08, 0x05, 0x4d, 0x55, 0x86, 0x46, + 0xab, 0xe7, 0x9c, 0xd7, 0xb4, 0xf3, 0xc6, 0x5b, 0x6e, 0x3a, 0x34, 0xec, 0x71, 0x95, 0xb5, 0x27, + 0xfb, 0xd5, 0x73, 0x9f, 0xef, 0x57, 0xcf, 0x3d, 0xdd, 0xaf, 0x9e, 0xfb, 0xf1, 0xb8, 0x2a, 0x3d, + 0x19, 0x57, 0xa5, 0xcf, 0xc7, 0x55, 0xe9, 0xe9, 0xb8, 0x2a, 0xfd, 0x65, 0x5c, 0x95, 0x7e, 0xfe, + 0x45, 0xf5, 0xdc, 0x77, 0x16, 0xb3, 0xfe, 0x98, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13, + 0x7c, 0x49, 0xa4, 0xf7, 0x2a, 0x00, 0x00, +} + +func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { @@ -971,6 +1277,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *JSONPatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MatchCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1086,7 +1420,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1096,112 +1430,18 @@ func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.MatchConditions) > 0 { - for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - } - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.ReinvocationPolicy != nil { - i -= len(*m.ReinvocationPolicy) - copy(dAtA[i:], *m.ReinvocationPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) - i-- - dAtA[i] = 0x52 - } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a - } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 - } - } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1210,15 +1450,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1228,30 +1473,26 @@ func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1265,7 +1506,7 @@ func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1275,12 +1516,12 @@ func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1312,7 +1553,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1322,39 +1563,49 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - i-- - dAtA[i] = 0x12 - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ParamKind) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1364,30 +1615,44 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ParamRef) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1397,26 +1662,73 @@ func (m *ParamRef) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ParameterNotFoundAction != nil { - i -= len(*m.ParameterNotFoundAction) - copy(dAtA[i:], *m.ParameterNotFoundAction) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i -= len(m.ReinvocationPolicy) + copy(dAtA[i:], m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x3a + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a } - if m.Selector != nil { + if len(m.Mutations) > 0 { + for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1424,67 +1736,24 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - i-- - dAtA[i] = 0x20 + dAtA[i] = 0x12 } - if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *TypeChecking) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1494,20 +1763,20 @@ func (m *TypeChecking) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ExpressionWarnings) > 0 { - for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1515,54 +1784,91 @@ func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x62 } } - return len(dAtA) - i, nil -} - -func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - return dAtA[:n], nil -} - -func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1570,11 +1876,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1584,26 +1895,30 @@ func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1617,7 +1932,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1627,12 +1942,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1664,7 +1979,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { +func (m *Mutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1674,28 +1989,19 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *Mutation) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ValidationActions) > 0 { - for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ValidationActions[iNdEx]) - copy(dAtA[i:], m.ValidationActions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.MatchResources != nil { + if m.JSONPatch != nil { { - size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1703,11 +2009,11 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } - if m.ParamRef != nil { + if m.ApplyConfiguration != nil { { - size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1715,17 +2021,17 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - i -= len(m.PolicyName) - copy(dAtA[i:], m.PolicyName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i -= len(m.PatchType) + copy(dAtA[i:], m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1735,32 +2041,18 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1768,11 +2060,20 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ParamKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1782,94 +2083,59 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Variables) > 0 { - for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.MatchConditions) > 0 { - for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.AuditAnnotations) > 0 { - for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ParamRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + return dAtA[:n], nil +} + +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) i-- dAtA[i] = 0x22 } - if len(m.Validations) > 0 { - for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.MatchConstraints != nil { - { - size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ParamKind != nil { + if m.Selector != nil { { - size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1877,12 +2143,22 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1892,49 +2168,42 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 } - if m.TypeChecking != nil { - { - size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1944,20 +2213,20 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.MatchConditions) > 0 { - for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1965,84 +2234,44 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x5a - } - } - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x52 } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 - } - } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2051,15 +2280,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2069,30 +2303,26 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2106,7 +2336,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2116,12 +2346,12 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2153,7 +2383,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *Validation) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2163,42 +2393,58 @@ func (m *Validation) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validation) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.MessageExpression) - copy(dAtA[i:], m.MessageExpression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) - i-- - dAtA[i] = 0x22 - if m.Reason != nil { - i -= len(*m.Reason) - copy(dAtA[i:], *m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x1a } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Variable) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2208,30 +2454,44 @@ func (m *Variable) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Variable) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2241,335 +2501,636 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0x1a - } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- - dAtA[i] = 0x12 - } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ } - dAtA[offset] = uint8(v) - return base -} -func (m *AuditAnnotation) Size() (n int) { - if m == nil { - return 0 + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ValueExpression) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ExpressionWarning) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.FieldRef) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Warning) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *MatchCondition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MatchResources) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ResourceRules) > 0 { - for _, e := range m.ResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.ExcludeResourceRules) > 0 { - for _, e := range m.ExcludeResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *MutatingWebhook) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 } } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 } - if len(m.MatchConditions) > 0 { - for _, e := range m.MatchConditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - return n -} - -func (m *MutatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return n -} - -func (m *MutatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.RuleWithOperations.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ParamKind) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamRef) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.ParameterNotFoundAction != nil { - l = len(*m.ParameterNotFoundAction) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *TypeChecking) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.ExpressionWarnings) > 0 { - for _, e := range m.ExpressionWarnings { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicy) Size() (n int) { - if m == nil { - return 0 +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ApplyConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JSONPatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { +func (m *MutatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MutatingAdmissionPolicyBindingList) Size() (n int) { if m == nil { return 0 } @@ -2586,7 +3147,7 @@ func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { return n } -func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { +func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) { if m == nil { return 0 } @@ -2602,16 +3163,10 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { l = m.MatchResources.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.ValidationActions) > 0 { - for _, s := range m.ValidationActions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *ValidatingAdmissionPolicyList) Size() (n int) { +func (m *MutatingAdmissionPolicyList) Size() (n int) { if m == nil { return 0 } @@ -2628,7 +3183,7 @@ func (m *ValidatingAdmissionPolicyList) Size() (n int) { return n } -func (m *ValidatingAdmissionPolicySpec) Size() (n int) { +func (m *MutatingAdmissionPolicySpec) Size() (n int) { if m == nil { return 0 } @@ -2642,8 +3197,14 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) { l = m.MatchConstraints.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Validations) > 0 { - for _, e := range m.Validations { + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Mutations) > 0 { + for _, e := range m.Mutations { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -2652,48 +3213,18 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) { l = len(*m.FailurePolicy) n += 1 + l + sovGenerated(uint64(l)) } - if len(m.AuditAnnotations) > 0 { - for _, e := range m.AuditAnnotations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } if len(m.MatchConditions) > 0 { for _, e := range m.MatchConditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Variables) > 0 { - for _, e := range m.Variables { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if m.TypeChecking != nil { - l = m.TypeChecking.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + l = len(m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ValidatingWebhook) Size() (n int) { +func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 } @@ -2734,6 +3265,10 @@ func (m *ValidatingWebhook) Size() (n int) { l = len(*m.MatchPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } if m.ObjectSelector != nil { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -2747,7 +3282,7 @@ func (m *ValidatingWebhook) Size() (n int) { return n } -func (m *ValidatingWebhookConfiguration) Size() (n int) { +func (m *MutatingWebhookConfiguration) Size() (n int) { if m == nil { return 0 } @@ -2764,7 +3299,7 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) { return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { +func (m *MutatingWebhookConfigurationList) Size() (n int) { if m == nil { return 0 } @@ -2781,476 +3316,1911 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) { return n } -func (m *Validation) Size() (n int) { +func (m *Mutation) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = len(m.PatchType) n += 1 + l + sovGenerated(uint64(l)) - if m.Reason != nil { - l = len(*m.Reason) + if m.ApplyConfiguration != nil { + l = m.ApplyConfiguration.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.MessageExpression) + if m.JSONPatch != nil { + l = m.JSONPatch.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *Variable) Size() (n int) { +func (m *ParamKind) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = len(m.APIVersion) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) + l = len(m.Kind) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *WebhookClientConfig) Size() (n int) { +func (m *ParamRef) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Service != nil { - l = m.Service.Size() + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.CABundle != nil { - l = len(m.CABundle) + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) n += 1 + l + sovGenerated(uint64(l)) } - if m.URL != nil { - l = len(*m.URL) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *AuditAnnotation) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AuditAnnotation{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ExpressionWarning) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ExpressionWarning{`, - `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, - `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *MatchCondition) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&MatchCondition{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *MatchResources) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ResourceRules { - repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForResourceRules += "}" - repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ExcludeResourceRules { - repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExcludeResourceRules += "}" - s := strings.Join([]string{`&MatchResources{`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ResourceRules:` + repeatedStringForResourceRules + `,`, - `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `}`, - }, "") - return s + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForRules += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForMatchConditions += "}" - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `}`, - }, "") - return s + return n } -func (this *MutatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForWebhooks := "[]MutatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]MutatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NamedRuleWithOperations) String() string { - if this == nil { - return "nil" + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NamedRuleWithOperations{`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ParamKind) String() string { - if this == nil { - return "nil" + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ParamKind{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `}`, - }, "") - return s -} -func (this *ParamRef) String() string { - if this == nil { - return "nil" + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ParamRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s + return n } -func (this *TypeChecking) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForExpressionWarnings := "[]ExpressionWarning{" - for _, f := range this.ExpressionWarnings { - repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExpressionWarnings += "}" - s := strings.Join([]string{`&TypeChecking{`, - `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicy) String() string { - if this == nil { - return "nil" + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingAdmissionPolicyBinding) String() string { - if this == nil { - return "nil" + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicyBindingList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicyBindingSpec) String() string { - if this == nil { - return "nil" + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, - `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, - `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, - `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, - `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicyList) String() string { - if this == nil { - return "nil" + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]ValidatingAdmissionPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicySpec) String() string { - if this == nil { - return "nil" + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForValidations := "[]Validation{" - for _, f := range this.Validations { - repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForValidations += "}" - repeatedStringForAuditAnnotations := "[]AuditAnnotation{" - for _, f := range this.AuditAnnotations { - repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForAuditAnnotations += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForMatchConditions += "}" - repeatedStringForVariables := "[]Variable{" - for _, f := range this.Variables { - repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForVariables += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, - `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, - `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, - `Validations:` + repeatedStringForValidations + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `Variables:` + repeatedStringForVariables + `,`, + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ApplyConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ApplyConfiguration{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, `}`, }, "") return s } -func (this *ValidatingAdmissionPolicyStatus) String() string { +func (this *AuditAnnotation) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhook) String() string { +func (this *ExpressionWarning) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += fmt.Sprintf("%v", f) + "," + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *JSONPatch) String() string { + if this == nil { + return "nil" } - repeatedStringForRules += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + s := strings.Join([]string{`&JSONPatch{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" } - repeatedStringForMatchConditions += "}" - s := strings.Join([]string{`&ValidatingWebhook{`, + s := strings.Join([]string{`&MatchCondition{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfiguration) String() string { +func (this *MutatingAdmissionPolicy) String() string { if this == nil { return "nil" } - repeatedStringForWebhooks := "[]ValidatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + s := strings.Join([]string{`&MutatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfigurationList) String() string { +func (this *MutatingAdmissionPolicyBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *Validation) String() string { +func (this *MutatingAdmissionPolicyBindingSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Validation{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + valueToStringGenerated(this.Reason) + `,`, - `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, `}`, }, "") return s } -func (this *Variable) String() string { +func (this *MutatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicySpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Variable{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + repeatedStringForMutations := "[]Mutation{" + for _, f := range this.Mutations { + repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + "," + } + repeatedStringForMutations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `Mutations:` + repeatedStringForMutations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Mutation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mutation{`, + `PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`, + `ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`, + `JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONPatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s + return nil } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" +func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3273,17 +5243,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3293,29 +5263,30 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3325,23 +5296,25 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ValueExpression = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, MutatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3364,7 +5337,7 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3387,15 +5360,15 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3423,13 +5396,49 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldRef = string(dAtA[iNdEx:postIndex]) + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3439,23 +5448,27 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Warning = string(dAtA[iNdEx:postIndex]) + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3478,7 +5491,7 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { } return nil } -func (m *MatchCondition) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3501,17 +5514,17 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3521,29 +5534,30 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3553,23 +5567,25 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Expression = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, MutatingAdmissionPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3592,7 +5608,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *MatchResources) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3615,15 +5631,15 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3650,16 +5666,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3686,16 +5702,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3722,14 +5738,14 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) - if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3756,14 +5772,81 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) - if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Mutations = append(m.Mutations, Mutation{}) + if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3791,8 +5874,7 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4160,7 +6242,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + s := k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex]) m.ReinvocationPolicy = &s iNdEx = postIndex case 11: @@ -4488,6 +6570,160 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } +func (m *Mutation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mutation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PatchType = PatchType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ApplyConfiguration == nil { + m.ApplyConfiguration = &ApplyConfiguration{} + } + if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JSONPatch == nil { + m.JSONPatch = &JSONPatch{} + } + if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 30f99f64d0..fb47a20056 100644 --- a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1beta1"; +// ApplyConfiguration defines the desired configuration values of an object. +message ApplyConfiguration { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + // AuditAnnotation describes how to produce an audit annotation for an API request. message AuditAnnotation { // key specifies the audit annotation key. The audit annotation keys of @@ -79,6 +124,75 @@ message ExpressionWarning { optional string warning = 3; } +// JSONPatch defines a JSON Patch. +message JSONPatch { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + // MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. message MatchCondition { // Name is an identifier for this match condition, used for strategic merging of MatchConditions, @@ -203,6 +317,173 @@ message MatchResources { optional string matchPolicy = 7; } +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +message MutatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicy. + optional MutatingAdmissionPolicySpec spec = 2; +} + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message MutatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + optional MutatingAdmissionPolicyBindingSpec spec = 2; +} + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated MutatingAdmissionPolicyBinding items = 2; +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingSpec { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + optional MatchResources matchResources = 3; +} + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +message MutatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated MutatingAdmissionPolicy items = 2; +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +message MutatingAdmissionPolicySpec { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + optional MatchResources matchConstraints = 2; + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + repeated Variable variables = 3; + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + repeated Mutation mutations = 4; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 5; + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + optional string reinvocationPolicy = 7; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -401,6 +682,26 @@ message MutatingWebhookConfigurationList { repeated MutatingWebhookConfiguration items = 2; } +// Mutation specifies the CEL expression which is used to apply the Mutation. +message Mutation { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + optional string patchType = 2; + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + optional ApplyConfiguration applyConfiguration = 3; + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + optional JSONPatch jsonPatch = 4; +} + // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. // +structType=atomic message NamedRuleWithOperations { diff --git a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/register.go index 363233a2f9..be64c4a5fa 100644 --- a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/register.go +++ b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -54,6 +54,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingAdmissionPolicyList{}, &ValidatingAdmissionPolicyBinding{}, &ValidatingAdmissionPolicyBindingList{}, + &MutatingAdmissionPolicy{}, + &MutatingAdmissionPolicyList{}, + &MutatingAdmissionPolicyBinding{}, + &MutatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 0f59031239..cffdda82c9 100644 --- a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -1073,7 +1073,7 @@ type MutatingWebhook struct { } // ReinvocationPolicyType specifies what type of policy the admission hook uses. -type ReinvocationPolicyType string +type ReinvocationPolicyType = v1.ReinvocationPolicyType const ( // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a @@ -1197,3 +1197,332 @@ type MatchCondition struct { // Required. Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +type MutatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicy. + Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +type MutatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +type MutatingAdmissionPolicySpec struct { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"` + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// Mutation specifies the CEL expression which is used to apply the Mutation. +type Mutation struct { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"` + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"` + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"` +} + +// PatchType specifies the type of patch operation for a mutation. +// +enum +type PatchType string + +const ( + // ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object. + PatchTypeApplyConfiguration PatchType = "ApplyConfiguration" + // JSONPatch indicates that the object is mutated through JSON Patch. + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// ApplyConfiguration defines the desired configuration values of an object. +type ApplyConfiguration struct { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// JSONPatch defines a JSON Patch. +type JSONPatch struct { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type MutatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingSpec struct { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` +} diff --git a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index cc1509b539..1a97c94729 100644 --- a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1beta1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ApplyConfiguration = map[string]string{ + "": "ApplyConfiguration defines the desired configuration values of an object.", + "expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (ApplyConfiguration) SwaggerDoc() map[string]string { + return map_ApplyConfiguration +} + var map_AuditAnnotation = map[string]string{ "": "AuditAnnotation describes how to produce an audit annotation for an API request.", "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", @@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string { return map_ExpressionWarning } +var map_JSONPatch = map[string]string{ + "": "JSONPatch defines a JSON Patch.", + "expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (JSONPatch) SwaggerDoc() map[string]string { + return map_JSONPatch +} + var map_MatchCondition = map[string]string{ "": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", @@ -70,6 +88,72 @@ func (MatchResources) SwaggerDoc() map[string]string { return map_MatchResources } +var map_MutatingAdmissionPolicy = map[string]string{ + "": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicy +} + +var map_MutatingAdmissionPolicyBinding = map[string]string{ + "": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.", +} + +func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBinding +} + +var map_MutatingAdmissionPolicyBindingList = map[string]string{ + "": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingList +} + +var map_MutatingAdmissionPolicyBindingSpec = map[string]string{ + "": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.", + "policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.", +} + +func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingSpec +} + +var map_MutatingAdmissionPolicyList = map[string]string{ + "": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyList +} + +var map_MutatingAdmissionPolicySpec = map[string]string{ + "": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.", + "paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.", + "variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.", + "mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.", +} + +func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicySpec +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -110,6 +194,17 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_MutatingWebhookConfigurationList } +var map_Mutation = map[string]string{ + "": "Mutation specifies the CEL expression which is used to apply the Mutation.", + "patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.", + "applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.", + "jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.", +} + +func (Mutation) SwaggerDoc() map[string]string { + return map_Mutation +} + var map_NamedRuleWithOperations = map[string]string{ "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index 4c10b1d113..3749a3d141 100644 --- a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration. +func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration { + if in == nil { + return nil + } + out := new(ApplyConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { *out = *in @@ -59,6 +75,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPatch) DeepCopyInto(out *JSONPatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch. +func (in *JSONPatch) DeepCopy() *JSONPatch { + if in == nil { + return nil + } + out := new(JSONPatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { *out = *in @@ -120,6 +152,200 @@ func (in *MatchResources) DeepCopy() *MatchResources { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy. +func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding. +func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList. +func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList. +func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + if in.Mutations != nil { + in, out := &in.Mutations, &out.Mutations + *out = make([]Mutation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec. +func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -168,7 +394,7 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { } if in.ReinvocationPolicy != nil { in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy - *out = new(ReinvocationPolicyType) + *out = new(admissionregistrationv1.ReinvocationPolicyType) **out = **in } if in.MatchConditions != nil { @@ -255,6 +481,32 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mutation) DeepCopyInto(out *Mutation) { + *out = *in + if in.ApplyConfiguration != nil { + in, out := &in.ApplyConfiguration, &out.ApplyConfiguration + *out = new(ApplyConfiguration) + **out = **in + } + if in.JSONPatch != nil { + in, out := &in.JSONPatch, &out.JSONPatch + *out = new(JSONPatch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation. +func (in *Mutation) DeepCopy() *Mutation { + if in == nil { + return nil + } + out := new(Mutation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { *out = *in diff --git a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go index c1be5122a8..4fc0596b34 100644 --- a/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/openshift/tools/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,78 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/apps/v1/generated.proto index 38c8997e99..5885a62225 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/apps/v1/generated.proto @@ -530,7 +530,7 @@ message RollingUpdateDaemonSet { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1/types.go b/openshift/tools/vendor/k8s.io/api/apps/v1/types.go index 1362d875d8..4cf54cc99b 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1/types.go +++ b/openshift/tools/vendor/k8s.io/api/apps/v1/types.go @@ -635,7 +635,7 @@ type RollingUpdateDaemonSet struct { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index f44ba7bc33..ac54033fd6 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto b/openshift/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto index 0601efc3c4..b61dc490db 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -316,6 +316,9 @@ message Scale { message ScaleSpec { // replicas is the number of observed instances of the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1beta1/types.go b/openshift/tools/vendor/k8s.io/api/apps/v1beta1/types.go index 5530c990da..cd140be12f 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/openshift/tools/vendor/k8s.io/api/apps/v1beta1/types.go @@ -33,6 +33,9 @@ const ( type ScaleSpec struct { // replicas is the number of observed instances of the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } @@ -60,6 +63,7 @@ type ScaleStatus struct { // +k8s:prerelease-lifecycle-gen:deprecated=1.8 // +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale +// +k8s:isSubresource=/scale // Scale represents a scaling request for a resource. type Scale struct { diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto b/openshift/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto index 68c463e257..37c6d5ae1b 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -536,7 +536,7 @@ message RollingUpdateDaemonSet { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may @@ -614,6 +614,9 @@ message Scale { message ScaleSpec { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types.go b/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types.go index 491afc59f5..e9dc85df05 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types.go @@ -35,6 +35,9 @@ const ( type ScaleSpec struct { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } @@ -63,6 +66,7 @@ type ScaleStatus struct { // +k8s:prerelease-lifecycle-gen:deprecated=1.9 // +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale +// +k8s:isSubresource=/scale // Scale represents a scaling request for a resource. type Scale struct { @@ -681,7 +685,7 @@ type RollingUpdateDaemonSet struct { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 4089434151..34d80af58d 100644 --- a/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { diff --git a/openshift/tools/vendor/k8s.io/api/authorization/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/authorization/v1/generated.proto index 37b05b8552..ff529c969e 100644 --- a/openshift/tools/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/authorization/v1/generated.proto @@ -167,16 +167,10 @@ message ResourceAttributes { optional string name = 7; // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional optional FieldSelectorAttributes fieldSelector = 8; // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional optional LabelSelectorAttributes labelSelector = 9; } diff --git a/openshift/tools/vendor/k8s.io/api/authorization/v1/types.go b/openshift/tools/vendor/k8s.io/api/authorization/v1/types.go index 36f5fa4107..251e776b02 100644 --- a/openshift/tools/vendor/k8s.io/api/authorization/v1/types.go +++ b/openshift/tools/vendor/k8s.io/api/authorization/v1/types.go @@ -119,15 +119,9 @@ type ResourceAttributes struct { // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` } diff --git a/openshift/tools/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index dc6b8a89ec..29d0aa8463 100644 --- a/openshift/tools/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -87,8 +87,8 @@ var map_ResourceAttributes = map[string]string{ "resource": "Resource is one of the existing resource types. \"*\" means all.", "subresource": "Subresource is one of the existing resource types. \"\" means none.", "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", - "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", - "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/openshift/tools/vendor/k8s.io/api/autoscaling/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/autoscaling/v1/generated.proto index 68c35b6b22..a17d7989db 100644 --- a/openshift/tools/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -472,6 +472,9 @@ message Scale { message ScaleSpec { // replicas is the desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/openshift/tools/vendor/k8s.io/api/autoscaling/v1/types.go b/openshift/tools/vendor/k8s.io/api/autoscaling/v1/types.go index 85c609e5c7..e1e8809fe9 100644 --- a/openshift/tools/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/openshift/tools/vendor/k8s.io/api/autoscaling/v1/types.go @@ -117,6 +117,7 @@ type HorizontalPodAutoscalerList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 +// +k8s:isSubresource=/scale // Scale represents a scaling request for a resource. type Scale struct { @@ -138,6 +139,9 @@ type Scale struct { type ScaleSpec struct { // replicas is the desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } diff --git a/openshift/tools/vendor/k8s.io/api/batch/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/batch/v1/generated.proto index d3aeae0adb..c0ce8cef26 100644 --- a/openshift/tools/vendor/k8s.io/api/batch/v1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/batch/v1/generated.proto @@ -226,7 +226,8 @@ message JobSpec { optional SuccessPolicy successPolicy = 16; // Specifies the number of retries before marking this job failed. - // Defaults to 6 + // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. + // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647. // +optional optional int32 backoffLimit = 7; @@ -329,8 +330,6 @@ message JobSpec { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. - // This is on by default. // +optional optional string podReplacementPolicy = 14; @@ -570,7 +569,7 @@ message PodFailurePolicyRule { message SuccessPolicy { // rules represents the list of alternative rules for the declaring the Jobs // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, - // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed. // The terminal state for such a Job has the "Complete" condition. // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, // other rules are ignored. At most 20 elements are allowed. diff --git a/openshift/tools/vendor/k8s.io/api/batch/v1/types.go b/openshift/tools/vendor/k8s.io/api/batch/v1/types.go index 6c0007c21e..9183c073d2 100644 --- a/openshift/tools/vendor/k8s.io/api/batch/v1/types.go +++ b/openshift/tools/vendor/k8s.io/api/batch/v1/types.go @@ -257,7 +257,7 @@ type PodFailurePolicy struct { type SuccessPolicy struct { // rules represents the list of alternative rules for the declaring the Jobs // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, - // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed. // The terminal state for such a Job has the "Complete" condition. // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, // other rules are ignored. At most 20 elements are allowed. @@ -347,7 +347,8 @@ type JobSpec struct { SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` // Specifies the number of retries before marking this job failed. - // Defaults to 6 + // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. + // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647. // +optional BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` @@ -455,8 +456,6 @@ type JobSpec struct { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. - // This is on by default. // +optional PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` diff --git a/openshift/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index ffd4e4f5fe..451f4609f2 100644 --- a/openshift/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -117,7 +117,7 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.", - "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", + "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.", "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.", "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", @@ -126,7 +126,7 @@ var map_JobSpec = map[string]string{ "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", - "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.", "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).", } @@ -206,7 +206,7 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string { var map_SuccessPolicy = map[string]string{ "": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.", - "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", + "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SuccessCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", } func (SuccessPolicy) SwaggerDoc() map[string]string { diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/certificates/v1/generated.proto index dac7c7f5f2..24528fc8bc 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1/generated.proto @@ -39,6 +39,8 @@ option go_package = "k8s.io/api/certificates/v1"; // This API can be used to request client certificates to authenticate to kube-apiserver // (with the "kubernetes.io/kube-apiserver-client" signerName), // or to obtain certificates from custom non-Kubernetes signers. +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval message CertificateSigningRequest { // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -203,6 +205,11 @@ message CertificateSigningRequestStatus { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember repeated CertificateSigningRequestCondition conditions = 1; // certificate is populated with an issued certificate by the signer after an Approved condition is present. diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1/types.go b/openshift/tools/vendor/k8s.io/api/certificates/v1/types.go index ba8009840d..71203e80d5 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1/types.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1/types.go @@ -39,6 +39,8 @@ import ( // This API can be used to request client certificates to authenticate to kube-apiserver // (with the "kubernetes.io/kube-apiserver-client" signerName), // or to obtain certificates from custom non-Kubernetes signers. +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval type CertificateSigningRequest struct { metav1.TypeMeta `json:",inline"` // +optional @@ -178,6 +180,11 @@ type CertificateSigningRequestStatus struct { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` // certificate is populated with an issued certificate by the signer after an Approved condition is present. diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go index a62a400596..c260f0436d 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -25,11 +25,14 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -127,10 +130,126 @@ func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo +func (m *PodCertificateRequest) Reset() { *m = PodCertificateRequest{} } +func (*PodCertificateRequest) ProtoMessage() {} +func (*PodCertificateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{3} +} +func (m *PodCertificateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequest.Merge(m, src) +} +func (m *PodCertificateRequest) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequest proto.InternalMessageInfo + +func (m *PodCertificateRequestList) Reset() { *m = PodCertificateRequestList{} } +func (*PodCertificateRequestList) ProtoMessage() {} +func (*PodCertificateRequestList) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{4} +} +func (m *PodCertificateRequestList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequestList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequestList.Merge(m, src) +} +func (m *PodCertificateRequestList) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequestList) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequestList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequestList proto.InternalMessageInfo + +func (m *PodCertificateRequestSpec) Reset() { *m = PodCertificateRequestSpec{} } +func (*PodCertificateRequestSpec) ProtoMessage() {} +func (*PodCertificateRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{5} +} +func (m *PodCertificateRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequestSpec.Merge(m, src) +} +func (m *PodCertificateRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequestSpec proto.InternalMessageInfo + +func (m *PodCertificateRequestStatus) Reset() { *m = PodCertificateRequestStatus{} } +func (*PodCertificateRequestStatus) ProtoMessage() {} +func (*PodCertificateRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{6} +} +func (m *PodCertificateRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequestStatus.Merge(m, src) +} +func (m *PodCertificateRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequestStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle") proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList") proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec") + proto.RegisterType((*PodCertificateRequest)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequest") + proto.RegisterType((*PodCertificateRequestList)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestList") + proto.RegisterType((*PodCertificateRequestSpec)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestSpec") + proto.RegisterType((*PodCertificateRequestStatus)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestStatus") } func init() { @@ -138,35 +257,65 @@ func init() { } var fileDescriptor_f73d5fe56c015bb8 = []byte{ - // 437 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6a, 0xdb, 0x40, - 0x10, 0xc6, 0xb5, 0x69, 0x02, 0xc9, 0xba, 0x85, 0xa2, 0x42, 0x31, 0x3e, 0x6c, 0x8c, 0x4f, 0xb9, - 0x74, 0x37, 0x36, 0x69, 0xc9, 0x59, 0x85, 0x42, 0xa1, 0x7f, 0x40, 0xe9, 0xa5, 0xa1, 0x87, 0xae, - 0xd7, 0x13, 0x79, 0x6b, 0x4b, 0x5a, 0x76, 0x57, 0x86, 0xde, 0x0a, 0x7d, 0x81, 0x3e, 0x96, 0x8f, - 0x69, 0x4f, 0x39, 0x85, 0x5a, 0x7d, 0x91, 0xb2, 0x6b, 0xd9, 0x12, 0x55, 0x8b, 0x4b, 0x6e, 0x9a, - 0xd1, 0xfc, 0xbe, 0x6f, 0xbe, 0x11, 0xc2, 0xa7, 0xb3, 0x73, 0x43, 0x65, 0xce, 0xb8, 0x92, 0x4c, - 0x80, 0xb6, 0xf2, 0x4a, 0x0a, 0x6e, 0xc1, 0xb0, 0xc5, 0x90, 0xcf, 0xd5, 0x94, 0x0f, 0x59, 0x02, - 0x19, 0x68, 0x6e, 0x61, 0x42, 0x95, 0xce, 0x6d, 0x1e, 0xf6, 0xd7, 0x04, 0xe5, 0x4a, 0xd2, 0x26, - 0x41, 0x37, 0x44, 0xef, 0x49, 0x22, 0xed, 0xb4, 0x18, 0x53, 0x91, 0xa7, 0x2c, 0xc9, 0x93, 0x9c, - 0x79, 0x70, 0x5c, 0x5c, 0xf9, 0xca, 0x17, 0xfe, 0x69, 0x2d, 0xd8, 0x3b, 0xab, 0x57, 0x48, 0xb9, - 0x98, 0xca, 0x0c, 0xf4, 0x67, 0xa6, 0x66, 0x89, 0x6b, 0x18, 0x96, 0x82, 0xe5, 0x6c, 0xd1, 0x5a, - 0xa3, 0xc7, 0xfe, 0x45, 0xe9, 0x22, 0xb3, 0x32, 0x85, 0x16, 0xf0, 0x6c, 0x17, 0x60, 0xc4, 0x14, - 0x52, 0xfe, 0x27, 0x37, 0xf8, 0x81, 0x70, 0xf8, 0x7c, 0x5e, 0x18, 0x0b, 0xfa, 0x9d, 0x2e, 0x8c, - 0x8d, 0x8a, 0x6c, 0x32, 0x87, 0xf0, 0x23, 0x3e, 0x74, 0xab, 0x4d, 0xb8, 0xe5, 0x5d, 0xd4, 0x47, - 0x27, 0x9d, 0xd1, 0x29, 0xad, 0x2f, 0xb3, 0x75, 0xa0, 0x6a, 0x96, 0xb8, 0x86, 0xa1, 0x6e, 0x9a, - 0x2e, 0x86, 0xf4, 0xed, 0xf8, 0x13, 0x08, 0xfb, 0x1a, 0x2c, 0x8f, 0xc2, 0xe5, 0xed, 0x71, 0x50, - 0xde, 0x1e, 0xe3, 0xba, 0x17, 0x6f, 0x55, 0xc3, 0x4b, 0xbc, 0x6f, 0x14, 0x88, 0xee, 0x9e, 0x57, - 0x3f, 0xa7, 0xbb, 0xee, 0x4e, 0xdb, 0x5b, 0x5e, 0x28, 0x10, 0xd1, 0xfd, 0xca, 0x65, 0xdf, 0x55, - 0xb1, 0xd7, 0x1c, 0x7c, 0x47, 0xf8, 0x71, 0x7b, 0xfc, 0x95, 0x34, 0x36, 0xfc, 0xd0, 0x0a, 0x46, - 0xff, 0x2f, 0x98, 0xa3, 0x7d, 0xac, 0x87, 0x95, 0xe1, 0xe1, 0xa6, 0xd3, 0x08, 0xf5, 0x1e, 0x1f, - 0x48, 0x0b, 0xa9, 0xe9, 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0x67, 0x77, 0x49, 0x15, 0x3d, 0xa8, - 0x0c, 0x0e, 0x5e, 0x3a, 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0xd7, 0x4c, 0x2e, 0x74, 0x38, 0xc2, - 0xd8, 0xc8, 0x24, 0x03, 0xfd, 0x86, 0xa7, 0xe0, 0x53, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0xbe, 0x89, - 0x1b, 0x53, 0xe1, 0x53, 0xdc, 0xb1, 0xb5, 0x8c, 0xff, 0x0a, 0x47, 0xd1, 0xa3, 0x0a, 0xea, 0x34, - 0x1c, 0xe2, 0xe6, 0x5c, 0xf4, 0x62, 0xb9, 0x22, 0xc1, 0xf5, 0x8a, 0x04, 0x37, 0x2b, 0x12, 0x7c, - 0x29, 0x09, 0x5a, 0x96, 0x04, 0x5d, 0x97, 0x04, 0xdd, 0x94, 0x04, 0xfd, 0x2c, 0x09, 0xfa, 0xf6, - 0x8b, 0x04, 0x97, 0xfd, 0x5d, 0xbf, 0xdd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x1c, 0xcb, - 0xdd, 0x99, 0x03, 0x00, 0x00, + // 918 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0xb6, 0x69, 0x9b, 0x49, 0x5b, 0xda, 0x61, 0x17, 0x99, 0x22, 0x39, 0x21, 0x07, + 0x54, 0x90, 0xb0, 0xb7, 0xa5, 0xb0, 0x2b, 0x10, 0x48, 0x75, 0x0a, 0x52, 0xe9, 0x6e, 0x36, 0x9a, + 0x74, 0xf9, 0xb1, 0x5a, 0x24, 0x1c, 0xe7, 0x25, 0x19, 0x1a, 0x7b, 0x8c, 0x67, 0x5c, 0xb5, 0x37, + 0x24, 0xfe, 0x01, 0xfe, 0x23, 0xae, 0x3d, 0x2e, 0x5c, 0xd8, 0x53, 0xa0, 0xe6, 0x6f, 0xe0, 0xb2, + 0x27, 0xe4, 0xb1, 0x9d, 0x5f, 0x4e, 0xb6, 0xd9, 0x1e, 0x7a, 0xcb, 0xbc, 0x79, 0xdf, 0xcf, 0xfb, + 0xbe, 0x99, 0x37, 0x56, 0xd0, 0xbd, 0xd3, 0x07, 0x5c, 0xa7, 0xcc, 0xb0, 0x3c, 0x6a, 0xd8, 0xe0, + 0x0b, 0xda, 0xa6, 0xb6, 0x25, 0x80, 0x1b, 0x67, 0xbb, 0x56, 0xcf, 0xeb, 0x5a, 0xbb, 0x46, 0x07, + 0x5c, 0xf0, 0x2d, 0x01, 0x2d, 0xdd, 0xf3, 0x99, 0x60, 0xb8, 0x1c, 0x2b, 0x74, 0xcb, 0xa3, 0xfa, + 0xa8, 0x42, 0x4f, 0x15, 0xdb, 0x1f, 0x76, 0xa8, 0xe8, 0x06, 0x4d, 0xdd, 0x66, 0x8e, 0xd1, 0x61, + 0x1d, 0x66, 0x48, 0x61, 0x33, 0x68, 0xcb, 0x95, 0x5c, 0xc8, 0x5f, 0x31, 0x70, 0x7b, 0x7f, 0x68, + 0xc1, 0xb1, 0xec, 0x2e, 0x75, 0xc1, 0xbf, 0x30, 0xbc, 0xd3, 0x4e, 0x14, 0xe0, 0x86, 0x03, 0xc2, + 0x32, 0xce, 0x32, 0x36, 0xb6, 0x8d, 0x59, 0x2a, 0x3f, 0x70, 0x05, 0x75, 0x20, 0x23, 0xf8, 0xe4, + 0x3a, 0x01, 0xb7, 0xbb, 0xe0, 0x58, 0x93, 0xba, 0xca, 0x9f, 0x0a, 0xc2, 0xd5, 0x5e, 0xc0, 0x05, + 0xf8, 0x27, 0x7e, 0xc0, 0x85, 0x19, 0xb8, 0xad, 0x1e, 0xe0, 0x1f, 0xd1, 0x6a, 0x64, 0xad, 0x65, + 0x09, 0x4b, 0x55, 0xca, 0xca, 0x4e, 0x71, 0xef, 0x9e, 0x3e, 0x3c, 0x99, 0x41, 0x05, 0xdd, 0x3b, + 0xed, 0x44, 0x01, 0xae, 0x47, 0xd9, 0xfa, 0xd9, 0xae, 0xfe, 0xb8, 0xf9, 0x13, 0xd8, 0xe2, 0x11, + 0x08, 0xcb, 0xc4, 0x97, 0xfd, 0x52, 0x2e, 0xec, 0x97, 0xd0, 0x30, 0x46, 0x06, 0x54, 0xfc, 0x14, + 0x2d, 0x71, 0x0f, 0x6c, 0x75, 0x41, 0xd2, 0x1f, 0xe8, 0xd7, 0x9d, 0xbb, 0x9e, 0x75, 0xd9, 0xf0, + 0xc0, 0x36, 0xd7, 0x92, 0x2a, 0x4b, 0xd1, 0x8a, 0x48, 0x66, 0xe5, 0x0f, 0x05, 0xbd, 0x95, 0x4d, + 0x7f, 0x48, 0xb9, 0xc0, 0xcf, 0x32, 0x8d, 0xe9, 0xf3, 0x35, 0x16, 0xa9, 0x65, 0x5b, 0x9b, 0x49, + 0xc1, 0xd5, 0x34, 0x32, 0xd2, 0xd4, 0xf7, 0x28, 0x4f, 0x05, 0x38, 0x5c, 0x5d, 0x28, 0x2f, 0xee, + 0x14, 0xf7, 0xf6, 0x6f, 0xd2, 0x95, 0xb9, 0x9e, 0x14, 0xc8, 0x1f, 0x45, 0x28, 0x12, 0x13, 0x2b, + 0xbf, 0x4e, 0xed, 0x29, 0x6a, 0x1a, 0xef, 0x21, 0xc4, 0x69, 0xc7, 0x05, 0xbf, 0x66, 0x39, 0x20, + 0xbb, 0x2a, 0x0c, 0x0f, 0xbf, 0x31, 0xd8, 0x21, 0x23, 0x59, 0xf8, 0x63, 0x54, 0x14, 0x43, 0x8c, + 0xbc, 0x85, 0x82, 0xf9, 0x66, 0x22, 0x2a, 0x8e, 0x54, 0x20, 0xa3, 0x79, 0x95, 0xdf, 0x17, 0xd0, + 0xdd, 0x3a, 0x6b, 0x55, 0x87, 0xbd, 0x10, 0xf8, 0x39, 0x00, 0x2e, 0x6e, 0x61, 0x62, 0x7e, 0x18, + 0x9b, 0x98, 0xcf, 0xae, 0x3f, 0xdb, 0xa9, 0x46, 0x67, 0x0d, 0x0d, 0x06, 0xb4, 0xcc, 0x85, 0x25, + 0x02, 0xae, 0x2e, 0xca, 0x02, 0x9f, 0xdf, 0xb4, 0x80, 0x84, 0x98, 0x1b, 0x49, 0x89, 0xe5, 0x78, + 0x4d, 0x12, 0x78, 0xe5, 0x2f, 0x05, 0xbd, 0x3d, 0x55, 0x77, 0x0b, 0xe3, 0xf9, 0x6c, 0x7c, 0x3c, + 0xef, 0xdf, 0xb0, 0xc3, 0x19, 0x13, 0xfa, 0x5f, 0x7e, 0x46, 0x67, 0x37, 0x1e, 0xd2, 0xf7, 0xd1, + 0x8a, 0xc7, 0x5a, 0x52, 0x10, 0x0f, 0xe8, 0x1b, 0x89, 0x60, 0xa5, 0x1e, 0x87, 0x49, 0xba, 0x8f, + 0x8f, 0xd1, 0xb2, 0xc7, 0x5a, 0x4f, 0x8e, 0x0e, 0xe5, 0xed, 0x15, 0xcc, 0x8f, 0xd2, 0xe3, 0xaf, + 0xcb, 0xe8, 0xcb, 0x7e, 0xe9, 0xdd, 0x59, 0x5f, 0x48, 0x71, 0xe1, 0x01, 0xd7, 0x9f, 0x1c, 0x1d, + 0x92, 0x04, 0x81, 0xbf, 0x46, 0x98, 0x83, 0x7f, 0x46, 0x6d, 0x38, 0xb0, 0x6d, 0x16, 0xb8, 0x42, + 0x5a, 0x58, 0x92, 0xe0, 0xed, 0x04, 0x8c, 0x1b, 0x99, 0x0c, 0x32, 0x45, 0x85, 0x7b, 0x68, 0x6b, + 0x3c, 0x1a, 0x79, 0xcc, 0x4b, 0xd4, 0x17, 0x09, 0x6a, 0xab, 0x31, 0x99, 0x30, 0x9f, 0xdd, 0x2c, + 0x18, 0x7f, 0x83, 0x56, 0x5d, 0xd6, 0x02, 0xe9, 0x77, 0x59, 0x16, 0xf9, 0x34, 0x9d, 0x87, 0x5a, + 0x12, 0x7f, 0xd9, 0x2f, 0xbd, 0xf7, 0x6a, 0x76, 0x9a, 0x49, 0x06, 0x2c, 0x5c, 0x43, 0x2b, 0xd1, + 0xef, 0xc8, 0xfb, 0x8a, 0xc4, 0xee, 0xa7, 0x37, 0x51, 0x8b, 0xc3, 0xf3, 0x39, 0x4e, 0x21, 0xf8, + 0x21, 0xba, 0xe3, 0x58, 0xe7, 0x5f, 0x9e, 0x7b, 0xd4, 0xb7, 0x04, 0x65, 0x6e, 0x03, 0x6c, 0xe6, + 0xb6, 0xb8, 0xba, 0x5a, 0x56, 0x76, 0xf2, 0xa6, 0x1a, 0xf6, 0x4b, 0x77, 0x1e, 0x4d, 0xd9, 0x27, + 0x53, 0x55, 0xf8, 0x3e, 0x5a, 0xf7, 0x4e, 0xe9, 0x79, 0x3d, 0x68, 0xf6, 0xa8, 0x7d, 0x0c, 0x17, + 0x6a, 0xa1, 0xac, 0xec, 0xac, 0x99, 0x5b, 0x61, 0xbf, 0xb4, 0x5e, 0x3f, 0x3e, 0xfa, 0x6e, 0xb0, + 0x41, 0xc6, 0xf3, 0x70, 0x15, 0x6d, 0x79, 0x3e, 0x63, 0xed, 0xc7, 0xed, 0x3a, 0xe3, 0x1c, 0x38, + 0xa7, 0xcc, 0x55, 0x91, 0x14, 0xdf, 0x8d, 0x2e, 0xa6, 0x3e, 0xb9, 0x49, 0xb2, 0xf9, 0x95, 0xbf, + 0x17, 0xd1, 0x3b, 0xaf, 0xf8, 0x12, 0x60, 0x1b, 0xa1, 0xc8, 0x26, 0x8d, 0x1c, 0x73, 0x55, 0x91, + 0x4f, 0xcf, 0x98, 0xef, 0x55, 0x57, 0x53, 0xdd, 0xf0, 0xa9, 0x0c, 0x42, 0x9c, 0x8c, 0x60, 0xf1, + 0x21, 0xda, 0x1c, 0x79, 0xc1, 0xd5, 0xae, 0x45, 0xdd, 0xe4, 0xcd, 0xa8, 0x89, 0x72, 0xb3, 0x3a, + 0xb1, 0x4f, 0x32, 0x0a, 0xfc, 0x2d, 0x2a, 0xb8, 0x4c, 0x98, 0xd0, 0x66, 0x7e, 0x3c, 0xef, 0xc5, + 0xbd, 0x0f, 0xe6, 0x73, 0x7a, 0x42, 0x1d, 0x30, 0xd7, 0xc3, 0x7e, 0xa9, 0x50, 0x4b, 0x01, 0x64, + 0xc8, 0xc2, 0x6d, 0xb4, 0xd1, 0x84, 0x0e, 0x75, 0x09, 0xb4, 0x7d, 0xe0, 0xdd, 0x03, 0x21, 0x9f, + 0xc0, 0xeb, 0xd1, 0x71, 0xd8, 0x2f, 0x6d, 0x98, 0x63, 0x14, 0x32, 0x41, 0xc5, 0x27, 0xd1, 0xfc, + 0x8b, 0x83, 0xb6, 0x00, 0x5f, 0xce, 0xff, 0xeb, 0x55, 0x58, 0x8b, 0xdf, 0x49, 0xac, 0x27, 0x03, + 0x92, 0xf9, 0xd5, 0xe5, 0x95, 0x96, 0x7b, 0x7e, 0xa5, 0xe5, 0x5e, 0x5c, 0x69, 0xb9, 0x5f, 0x42, + 0x4d, 0xb9, 0x0c, 0x35, 0xe5, 0x79, 0xa8, 0x29, 0x2f, 0x42, 0x4d, 0xf9, 0x27, 0xd4, 0x94, 0xdf, + 0xfe, 0xd5, 0x72, 0x4f, 0xcb, 0xd7, 0xfd, 0xd9, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x6c, + 0x5a, 0xc4, 0x8f, 0x0a, 0x00, 0x00, } func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { @@ -292,6 +441,261 @@ func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *PodCertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodCertificateRequestList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequestList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodCertificateRequestSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequestSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProofOfPossession != nil { + i -= len(m.ProofOfPossession) + copy(dAtA[i:], m.ProofOfPossession) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProofOfPossession))) + i-- + dAtA[i] = 0x52 + } + if m.PKIXPublicKey != nil { + i -= len(m.PKIXPublicKey) + copy(dAtA[i:], m.PKIXPublicKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PKIXPublicKey))) + i-- + dAtA[i] = 0x4a + } + if m.MaxExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds)) + i-- + dAtA[i] = 0x40 + } + i -= len(m.NodeUID) + copy(dAtA[i:], m.NodeUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeUID))) + i-- + dAtA[i] = 0x3a + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceAccountUID) + copy(dAtA[i:], m.ServiceAccountUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountUID))) + i-- + dAtA[i] = 0x2a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x22 + i -= len(m.PodUID) + copy(dAtA[i:], m.PodUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodUID))) + i-- + dAtA[i] = 0x1a + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodCertificateRequestStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequestStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NotAfter != nil { + { + size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.BeginRefreshAt != nil { + { + size, err := m.BeginRefreshAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.NotBefore != nil { + { + size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.CertificateChain) + copy(dAtA[i:], m.CertificateChain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChain))) + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -346,25 +750,120 @@ func (m *ClusterTrustBundleSpec) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ClusterTrustBundle) String() string { - if this == nil { - return "nil" +func (m *PodCertificateRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ClusterTrustBundle{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterTrustBundleList) String() string { - if this == nil { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCertificateRequestList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodCertificateRequestSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServiceAccountUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeUID) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds)) + } + if m.PKIXPublicKey != nil { + l = len(m.PKIXPublicKey) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProofOfPossession != nil { + l = len(m.ProofOfPossession) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodCertificateRequestStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.CertificateChain) + n += 1 + l + sovGenerated(uint64(l)) + if m.NotBefore != nil { + l = m.NotBefore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BeginRefreshAt != nil { + l = m.BeginRefreshAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NotAfter != nil { + l = m.NotAfter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { return "nil" } repeatedStringForItems := "[]ClusterTrustBundle{" @@ -390,6 +889,72 @@ func (this *ClusterTrustBundleSpec) String() string { }, "") return s } +func (this *PodCertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCertificateRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodCertificateRequestSpec", "PodCertificateRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodCertificateRequestStatus", "PodCertificateRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodCertificateRequestList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodCertificateRequest{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodCertificateRequest", "PodCertificateRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodCertificateRequestList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodCertificateRequestSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCertificateRequestSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `PodUID:` + fmt.Sprintf("%v", this.PodUID) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `ServiceAccountUID:` + fmt.Sprintf("%v", this.ServiceAccountUID) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeUID:` + fmt.Sprintf("%v", this.NodeUID) + `,`, + `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`, + `PKIXPublicKey:` + valueToStringGenerated(this.PKIXPublicKey) + `,`, + `ProofOfPossession:` + valueToStringGenerated(this.ProofOfPossession) + `,`, + `}`, + }, "") + return s +} +func (this *PodCertificateRequestStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PodCertificateRequestStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `CertificateChain:` + fmt.Sprintf("%v", this.CertificateChain) + `,`, + `NotBefore:` + strings.Replace(fmt.Sprintf("%v", this.NotBefore), "Time", "v1.Time", 1) + `,`, + `BeginRefreshAt:` + strings.Replace(fmt.Sprintf("%v", this.BeginRefreshAt), "Time", "v1.Time", 1) + `,`, + `NotAfter:` + strings.Replace(fmt.Sprintf("%v", this.NotAfter), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -745,6 +1310,858 @@ func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodCertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateRequestList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequestList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequestList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodCertificateRequest{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = k8s_io_apimachinery_pkg_types.NodeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxExpirationSeconds = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PKIXPublicKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PKIXPublicKey = append(m.PKIXPublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.PKIXPublicKey == nil { + m.PKIXPublicKey = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofOfPossession", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofOfPossession = append(m.ProofOfPossession[:0], dAtA[iNdEx:postIndex]...) + if m.ProofOfPossession == nil { + m.ProofOfPossession = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateChain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertificateChain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NotBefore == nil { + m.NotBefore = &v1.Time{} + } + if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginRefreshAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeginRefreshAt == nil { + m.BeginRefreshAt = &v1.Time{} + } + if err := m.BeginRefreshAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NotAfter == nil { + m.NotAfter = &v1.Time{} + } + if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.proto index 7155f778cf..194bdbc14f 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -101,3 +101,208 @@ message ClusterTrustBundleSpec { optional string trustBundle = 2; } +// PodCertificateRequest encodes a pod requesting a certificate from a given +// signer. +// +// Kubelets use this API to implement podCertificate projected volumes +message PodCertificateRequest { + // metadata contains the object metadata. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the details about the certificate being requested. + optional PodCertificateRequestSpec spec = 2; + + // status contains the issued certificate, and a standard set of conditions. + // +optional + optional PodCertificateRequestStatus status = 3; +} + +// PodCertificateRequestList is a collection of PodCertificateRequest objects +message PodCertificateRequestList { + // metadata contains the list metadata. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of PodCertificateRequest objects + repeated PodCertificateRequest items = 2; +} + +// PodCertificateRequestSpec describes the certificate request. All fields are +// immutable after creation. +message PodCertificateRequestSpec { + // signerName indicates the requested signer. + // + // All signer names beginning with `kubernetes.io` are reserved for use by + // the Kubernetes project. There is currently one well-known signer + // documented by the Kubernetes project, + // `kubernetes.io/kube-apiserver-client-pod`, which will issue client + // certificates understood by kube-apiserver. It is currently + // unimplemented. + // + // +required + optional string signerName = 1; + + // podName is the name of the pod into which the certificate will be mounted. + // + // +required + optional string podName = 2; + + // podUID is the UID of the pod into which the certificate will be mounted. + // + // +required + optional string podUID = 3; + + // serviceAccountName is the name of the service account the pod is running as. + // + // +required + optional string serviceAccountName = 4; + + // serviceAccountUID is the UID of the service account the pod is running as. + // + // +required + optional string serviceAccountUID = 5; + + // nodeName is the name of the node the pod is assigned to. + // + // +required + optional string nodeName = 6; + + // nodeUID is the UID of the node the pod is assigned to. + // + // +required + optional string nodeUID = 7; + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + // +default=86400 + optional int32 maxExpirationSeconds = 8; + + // pkixPublicKey is the PKIX-serialized public key the signer will issue the + // certificate to. + // + // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, + // or ED25519. Note that this list may be expanded in the future. + // + // Signer implementations do not need to support all key types supported by + // kube-apiserver and kubelet. If a signer does not support the key type + // used for a given PodCertificateRequest, it must deny the request by + // setting a status.conditions entry with a type of "Denied" and a reason of + // "UnsupportedKeyType". It may also suggest a key type that it does support + // in the message field. + // + // +required + optional bytes pkixPublicKey = 9; + + // proofOfPossession proves that the requesting kubelet holds the private + // key corresponding to pkixPublicKey. + // + // It is contructed by signing the ASCII bytes of the pod's UID using + // `pkixPublicKey`. + // + // kube-apiserver validates the proof of possession during creation of the + // PodCertificateRequest. + // + // If the key is an RSA key, then the signature is over the ASCII bytes of + // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang + // function crypto/rsa.SignPSS with nil options). + // + // If the key is an ECDSA key, then the signature is as described by [SEC 1, + // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the + // golang library function crypto/ecdsa.SignASN1) + // + // If the key is an ED25519 key, the the signature is as described by the + // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by + // the golang library crypto/ed25519.Sign). + // + // +required + optional bytes proofOfPossession = 10; +} + +// PodCertificateRequestStatus describes the status of the request, and holds +// the certificate data if the request is issued. +message PodCertificateRequestStatus { + // conditions applied to the request. + // + // The types "Issued", "Denied", and "Failed" have special handling. At + // most one of these conditions may be present, and they must have status + // "True". + // + // If the request is denied with `Reason=UnsupportedKeyType`, the signer may + // suggest a key type that will work in the message field. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; + + // certificateChain is populated with an issued certificate by the signer. + // This field is set via the /status subresource. Once populated, this field + // is immutable. + // + // If the certificate signing request is denied, a condition of type + // "Denied" is added and this field remains empty. If the signer cannot + // issue the certificate, a condition of type "Failed" is added and this + // field remains empty. + // + // Validation requirements: + // 1. certificateChain must consist of one or more PEM-formatted certificates. + // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as + // described in section 4 of RFC5280. + // + // If more than one block is present, and the definition of the requested + // spec.signerName does not indicate otherwise, the first block is the + // issued certificate, and subsequent blocks should be treated as + // intermediate certificates and presented in TLS handshakes. When + // projecting the chain into a pod volume, kubelet will drop any data + // in-between the PEM blocks, as well as any PEM block headers. + // + // +optional + optional string certificateChain = 2; + + // notBefore is the time at which the certificate becomes valid. The value + // must be the same as the notBefore value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4; + + // beginRefreshAt is the time at which the kubelet should begin trying to + // refresh the certificate. This field is set via the /status subresource, + // and must be set at the same time as certificateChain. Once populated, + // this field is immutable. + // + // This field is only a hint. Kubelet may start refreshing before or after + // this time if necessary. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5; + + // notAfter is the time at which the certificate expires. The value must be + // the same as the notAfter value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6; +} + diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/register.go b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/register.go index 7288ed9a3e..ae541e15c1 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/register.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/register.go @@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ClusterTrustBundle{}, &ClusterTrustBundleList{}, + &PodCertificateRequest{}, + &PodCertificateRequestList{}, ) // Add the watch version that applies diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types.go b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types.go index beef02599d..a5cb3809e7 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -18,6 +18,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // +genclient @@ -106,3 +107,233 @@ type ClusterTrustBundleList struct { // items is a collection of ClusterTrustBundle objects Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +k8s:prerelease-lifecycle-gen:introduced=1.34 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodCertificateRequest encodes a pod requesting a certificate from a given +// signer. +// +// Kubelets use this API to implement podCertificate projected volumes +type PodCertificateRequest struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the details about the certificate being requested. + Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status contains the issued certificate, and a standard set of conditions. + // +optional + Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodCertificateRequestSpec describes the certificate request. All fields are +// immutable after creation. +type PodCertificateRequestSpec struct { + // signerName indicates the requested signer. + // + // All signer names beginning with `kubernetes.io` are reserved for use by + // the Kubernetes project. There is currently one well-known signer + // documented by the Kubernetes project, + // `kubernetes.io/kube-apiserver-client-pod`, which will issue client + // certificates understood by kube-apiserver. It is currently + // unimplemented. + // + // +required + SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"` + + // podName is the name of the pod into which the certificate will be mounted. + // + // +required + PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"` + // podUID is the UID of the pod into which the certificate will be mounted. + // + // +required + PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"` + + // serviceAccountName is the name of the service account the pod is running as. + // + // +required + ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"` + // serviceAccountUID is the UID of the service account the pod is running as. + // + // +required + ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"` + + // nodeName is the name of the node the pod is assigned to. + // + // +required + NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"` + // nodeUID is the UID of the node the pod is assigned to. + // + // +required + NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"` + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + // +default=86400 + MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"` + + // pkixPublicKey is the PKIX-serialized public key the signer will issue the + // certificate to. + // + // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, + // or ED25519. Note that this list may be expanded in the future. + // + // Signer implementations do not need to support all key types supported by + // kube-apiserver and kubelet. If a signer does not support the key type + // used for a given PodCertificateRequest, it must deny the request by + // setting a status.conditions entry with a type of "Denied" and a reason of + // "UnsupportedKeyType". It may also suggest a key type that it does support + // in the message field. + // + // +required + PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"` + + // proofOfPossession proves that the requesting kubelet holds the private + // key corresponding to pkixPublicKey. + // + // It is contructed by signing the ASCII bytes of the pod's UID using + // `pkixPublicKey`. + // + // kube-apiserver validates the proof of possession during creation of the + // PodCertificateRequest. + // + // If the key is an RSA key, then the signature is over the ASCII bytes of + // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang + // function crypto/rsa.SignPSS with nil options). + // + // If the key is an ECDSA key, then the signature is as described by [SEC 1, + // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the + // golang library function crypto/ecdsa.SignASN1) + // + // If the key is an ED25519 key, the the signature is as described by the + // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by + // the golang library crypto/ed25519.Sign). + // + // +required + ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"` +} + +// PodCertificateRequestStatus describes the status of the request, and holds +// the certificate data if the request is issued. +type PodCertificateRequestStatus struct { + // conditions applied to the request. + // + // The types "Issued", "Denied", and "Failed" have special handling. At + // most one of these conditions may be present, and they must have status + // "True". + // + // If the request is denied with `Reason=UnsupportedKeyType`, the signer may + // suggest a key type that will work in the message field. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // certificateChain is populated with an issued certificate by the signer. + // This field is set via the /status subresource. Once populated, this field + // is immutable. + // + // If the certificate signing request is denied, a condition of type + // "Denied" is added and this field remains empty. If the signer cannot + // issue the certificate, a condition of type "Failed" is added and this + // field remains empty. + // + // Validation requirements: + // 1. certificateChain must consist of one or more PEM-formatted certificates. + // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as + // described in section 4 of RFC5280. + // + // If more than one block is present, and the definition of the requested + // spec.signerName does not indicate otherwise, the first block is the + // issued certificate, and subsequent blocks should be treated as + // intermediate certificates and presented in TLS handshakes. When + // projecting the chain into a pod volume, kubelet will drop any data + // in-between the PEM blocks, as well as any PEM block headers. + // + // +optional + CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"` + + // notBefore is the time at which the certificate becomes valid. The value + // must be the same as the notBefore value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"` + + // beginRefreshAt is the time at which the kubelet should begin trying to + // refresh the certificate. This field is set via the /status subresource, + // and must be set at the same time as certificateChain. Once populated, + // this field is immutable. + // + // This field is only a hint. Kubelet may start refreshing before or after + // this time if necessary. + // + // +optional + BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"` + + // notAfter is the time at which the certificate expires. The value must be + // the same as the notAfter value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"` +} + +// Well-known condition types for PodCertificateRequests +const ( + // Denied indicates the request was denied by the signer. + PodCertificateRequestConditionTypeDenied string = "Denied" + // Failed indicates the signer failed to issue the certificate. + PodCertificateRequestConditionTypeFailed string = "Failed" + // Issued indicates the certificate has been issued. + PodCertificateRequestConditionTypeIssued string = "Issued" +) + +// Well-known condition reasons for PodCertificateRequests +const ( + // UnsupportedKeyType should be set on "Denied" conditions when the signer + // doesn't support the key type of publicKey. + PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// PodCertificateRequestList is a collection of PodCertificateRequest objects +type PodCertificateRequestList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of PodCertificateRequest objects + Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go index bff649e3cb..d29f2d8505 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go @@ -57,4 +57,56 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { return map_ClusterTrustBundleSpec } +var map_PodCertificateRequest = map[string]string{ + "": "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the details about the certificate being requested.", + "status": "status contains the issued certificate, and a standard set of conditions.", +} + +func (PodCertificateRequest) SwaggerDoc() map[string]string { + return map_PodCertificateRequest +} + +var map_PodCertificateRequestList = map[string]string{ + "": "PodCertificateRequestList is a collection of PodCertificateRequest objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of PodCertificateRequest objects", +} + +func (PodCertificateRequestList) SwaggerDoc() map[string]string { + return map_PodCertificateRequestList +} + +var map_PodCertificateRequestSpec = map[string]string{ + "": "PodCertificateRequestSpec describes the certificate request. All fields are immutable after creation.", + "signerName": "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented.", + "podName": "podName is the name of the pod into which the certificate will be mounted.", + "podUID": "podUID is the UID of the pod into which the certificate will be mounted.", + "serviceAccountName": "serviceAccountName is the name of the service account the pod is running as.", + "serviceAccountUID": "serviceAccountUID is the UID of the service account the pod is running as.", + "nodeName": "nodeName is the name of the node the pod is assigned to.", + "nodeUID": "nodeUID is the UID of the node the pod is assigned to.", + "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.", + "pkixPublicKey": "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.", + "proofOfPossession": "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).", +} + +func (PodCertificateRequestSpec) SwaggerDoc() map[string]string { + return map_PodCertificateRequestSpec +} + +var map_PodCertificateRequestStatus = map[string]string{ + "": "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.", + "conditions": "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling. At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.", + "certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.", + "notBefore": "notBefore is the time at which the certificate becomes valid. The value must be the same as the notBefore value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.", + "beginRefreshAt": "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate. This field is set via the /status subresource, and must be set at the same time as certificateChain. Once populated, this field is immutable.\n\nThis field is only a hint. Kubelet may start refreshing before or after this time if necessary.", + "notAfter": "notAfter is the time at which the certificate expires. The value must be the same as the notAfter value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.", +} + +func (PodCertificateRequestStatus) SwaggerDoc() map[string]string { + return map_PodCertificateRequestStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go index 30a4dc1e80..25bc0ed6cb 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -100,3 +101,130 @@ func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest. +func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest { + if in == nil { + return nil + } + out := new(PodCertificateRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodCertificateRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodCertificateRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList. +func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList { + if in == nil { + return nil + } + out := new(PodCertificateRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) { + *out = *in + if in.MaxExpirationSeconds != nil { + in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds + *out = new(int32) + **out = **in + } + if in.PKIXPublicKey != nil { + in, out := &in.PKIXPublicKey, &out.PKIXPublicKey + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ProofOfPossession != nil { + in, out := &in.ProofOfPossession, &out.ProofOfPossession + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec. +func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec { + if in == nil { + return nil + } + out := new(PodCertificateRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = (*in).DeepCopy() + } + if in.BeginRefreshAt != nil { + in, out := &in.BeginRefreshAt, &out.BeginRefreshAt + *out = (*in).DeepCopy() + } + if in.NotAfter != nil { + in, out := &in.NotAfter, &out.NotAfter + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus. +func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus { + if in == nil { + return nil + } + out := new(PodCertificateRequestStatus) + in.DeepCopyInto(out) + return out +} diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go index 3121a87d08..edbfce79bc 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -56,3 +56,39 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { return 1, 37 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 7c48270f65..4c9385c196 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -30,6 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "k8s.io/api/certificates/v1beta1"; // Describes a certificate signing request +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval message CertificateSigningRequest { // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -182,6 +184,11 @@ message CertificateSigningRequestStatus { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember repeated CertificateSigningRequestCondition conditions = 1; // If request was approved, the controller will place the issued certificate here. diff --git a/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/types.go b/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/types.go index 1ce104807d..fadb7e082e 100644 --- a/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/openshift/tools/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -31,6 +31,8 @@ import ( // +k8s:prerelease-lifecycle-gen:replacement=certificates.k8s.io,v1,CertificateSigningRequest // Describes a certificate signing request +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval type CertificateSigningRequest struct { metav1.TypeMeta `json:",inline"` // +optional @@ -175,6 +177,11 @@ type CertificateSigningRequestStatus struct { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` // If request was approved, the controller will place the issued certificate here. diff --git a/openshift/tools/vendor/k8s.io/api/core/v1/generated.pb.go b/openshift/tools/vendor/k8s.io/api/core/v1/generated.pb.go index a4b8f58429..e1a297b985 100644 --- a/openshift/tools/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/openshift/tools/vendor/k8s.io/api/core/v1/generated.pb.go @@ -861,10 +861,38 @@ func (m *Container) XXX_DiscardUnknown() { var xxx_messageInfo_Container proto.InternalMessageInfo +func (m *ContainerExtendedResourceRequest) Reset() { *m = ContainerExtendedResourceRequest{} } +func (*ContainerExtendedResourceRequest) ProtoMessage() {} +func (*ContainerExtendedResourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{29} +} +func (m *ContainerExtendedResourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerExtendedResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerExtendedResourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerExtendedResourceRequest.Merge(m, src) +} +func (m *ContainerExtendedResourceRequest) XXX_Size() int { + return m.Size() +} +func (m *ContainerExtendedResourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerExtendedResourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerExtendedResourceRequest proto.InternalMessageInfo + func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{29} + return fileDescriptor_6c07b07c062484ab, []int{30} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +920,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{30} + return fileDescriptor_6c07b07c062484ab, []int{31} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{31} + return fileDescriptor_6c07b07c062484ab, []int{32} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -945,10 +973,66 @@ func (m *ContainerResizePolicy) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo +func (m *ContainerRestartRule) Reset() { *m = ContainerRestartRule{} } +func (*ContainerRestartRule) ProtoMessage() {} +func (*ContainerRestartRule) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{33} +} +func (m *ContainerRestartRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerRestartRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerRestartRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerRestartRule.Merge(m, src) +} +func (m *ContainerRestartRule) XXX_Size() int { + return m.Size() +} +func (m *ContainerRestartRule) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerRestartRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerRestartRule proto.InternalMessageInfo + +func (m *ContainerRestartRuleOnExitCodes) Reset() { *m = ContainerRestartRuleOnExitCodes{} } +func (*ContainerRestartRuleOnExitCodes) ProtoMessage() {} +func (*ContainerRestartRuleOnExitCodes) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{34} +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerRestartRuleOnExitCodes.Merge(m, src) +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Size() int { + return m.Size() +} +func (m *ContainerRestartRuleOnExitCodes) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerRestartRuleOnExitCodes.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerRestartRuleOnExitCodes proto.InternalMessageInfo + func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{32} + return fileDescriptor_6c07b07c062484ab, []int{35} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1060,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{33} + return fileDescriptor_6c07b07c062484ab, []int{36} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1088,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{34} + return fileDescriptor_6c07b07c062484ab, []int{37} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1116,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{35} + return fileDescriptor_6c07b07c062484ab, []int{38} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1144,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{36} + return fileDescriptor_6c07b07c062484ab, []int{39} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1172,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *ContainerUser) Reset() { *m = ContainerUser{} } func (*ContainerUser) ProtoMessage() {} func (*ContainerUser) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{37} + return fileDescriptor_6c07b07c062484ab, []int{40} } func (m *ContainerUser) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1200,7 @@ var xxx_messageInfo_ContainerUser proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{38} + return fileDescriptor_6c07b07c062484ab, []int{41} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1228,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{39} + return fileDescriptor_6c07b07c062484ab, []int{42} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1256,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{40} + return fileDescriptor_6c07b07c062484ab, []int{43} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1284,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{41} + return fileDescriptor_6c07b07c062484ab, []int{44} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1312,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{42} + return fileDescriptor_6c07b07c062484ab, []int{45} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1340,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{43} + return fileDescriptor_6c07b07c062484ab, []int{46} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1368,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{44} + return fileDescriptor_6c07b07c062484ab, []int{47} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1396,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{45} + return fileDescriptor_6c07b07c062484ab, []int{48} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1424,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{46} + return fileDescriptor_6c07b07c062484ab, []int{49} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1452,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{47} + return fileDescriptor_6c07b07c062484ab, []int{50} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1480,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{48} + return fileDescriptor_6c07b07c062484ab, []int{51} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1508,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{49} + return fileDescriptor_6c07b07c062484ab, []int{52} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1536,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{50} + return fileDescriptor_6c07b07c062484ab, []int{53} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1564,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{51} + return fileDescriptor_6c07b07c062484ab, []int{54} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1592,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{52} + return fileDescriptor_6c07b07c062484ab, []int{55} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1620,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{53} + return fileDescriptor_6c07b07c062484ab, []int{56} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1648,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{54} + return fileDescriptor_6c07b07c062484ab, []int{57} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1676,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{55} + return fileDescriptor_6c07b07c062484ab, []int{58} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1704,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{56} + return fileDescriptor_6c07b07c062484ab, []int{59} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1732,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{57} + return fileDescriptor_6c07b07c062484ab, []int{60} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1760,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{58} + return fileDescriptor_6c07b07c062484ab, []int{61} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1788,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{59} + return fileDescriptor_6c07b07c062484ab, []int{62} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1729,10 +1813,38 @@ func (m *FCVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo +func (m *FileKeySelector) Reset() { *m = FileKeySelector{} } +func (*FileKeySelector) ProtoMessage() {} +func (*FileKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{63} +} +func (m *FileKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileKeySelector.Merge(m, src) +} +func (m *FileKeySelector) XXX_Size() int { + return m.Size() +} +func (m *FileKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_FileKeySelector.DiscardUnknown(m) +} + +var xxx_messageInfo_FileKeySelector proto.InternalMessageInfo + func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{60} + return fileDescriptor_6c07b07c062484ab, []int{64} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1872,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{61} + return fileDescriptor_6c07b07c062484ab, []int{65} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1900,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{62} + return fileDescriptor_6c07b07c062484ab, []int{66} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1928,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{63} + return fileDescriptor_6c07b07c062484ab, []int{67} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1956,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{64} + return fileDescriptor_6c07b07c062484ab, []int{68} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1984,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{65} + return fileDescriptor_6c07b07c062484ab, []int{69} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +2012,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{66} + return fileDescriptor_6c07b07c062484ab, []int{70} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +2040,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{67} + return fileDescriptor_6c07b07c062484ab, []int{71} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +2068,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{68} + return fileDescriptor_6c07b07c062484ab, []int{72} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2096,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{69} + return fileDescriptor_6c07b07c062484ab, []int{73} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2124,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{70} + return fileDescriptor_6c07b07c062484ab, []int{74} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2152,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostIP) Reset() { *m = HostIP{} } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{71} + return fileDescriptor_6c07b07c062484ab, []int{75} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2180,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{72} + return fileDescriptor_6c07b07c062484ab, []int{76} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2208,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{73} + return fileDescriptor_6c07b07c062484ab, []int{77} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2236,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{74} + return fileDescriptor_6c07b07c062484ab, []int{78} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2264,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} } func (*ImageVolumeSource) ProtoMessage() {} func (*ImageVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{75} + return fileDescriptor_6c07b07c062484ab, []int{79} } func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2292,7 @@ var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{76} + return fileDescriptor_6c07b07c062484ab, []int{80} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2320,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{77} + return fileDescriptor_6c07b07c062484ab, []int{81} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2348,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{78} + return fileDescriptor_6c07b07c062484ab, []int{82} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2376,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{79} + return fileDescriptor_6c07b07c062484ab, []int{83} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2404,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{80} + return fileDescriptor_6c07b07c062484ab, []int{84} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2432,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{81} + return fileDescriptor_6c07b07c062484ab, []int{85} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2460,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{82} + return fileDescriptor_6c07b07c062484ab, []int{86} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2488,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} } func (*LinuxContainerUser) ProtoMessage() {} func (*LinuxContainerUser) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{83} + return fileDescriptor_6c07b07c062484ab, []int{87} } func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2516,7 @@ var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{84} + return fileDescriptor_6c07b07c062484ab, []int{88} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2544,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{85} + return fileDescriptor_6c07b07c062484ab, []int{89} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2572,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{86} + return fileDescriptor_6c07b07c062484ab, []int{90} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2600,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{87} + return fileDescriptor_6c07b07c062484ab, []int{91} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2628,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{88} + return fileDescriptor_6c07b07c062484ab, []int{92} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2656,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } func (*ModifyVolumeStatus) ProtoMessage() {} func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{89} + return fileDescriptor_6c07b07c062484ab, []int{93} } func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2684,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{90} + return fileDescriptor_6c07b07c062484ab, []int{94} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2712,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{91} + return fileDescriptor_6c07b07c062484ab, []int{95} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2740,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{92} + return fileDescriptor_6c07b07c062484ab, []int{96} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2768,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{93} + return fileDescriptor_6c07b07c062484ab, []int{97} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2796,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{94} + return fileDescriptor_6c07b07c062484ab, []int{98} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2824,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{95} + return fileDescriptor_6c07b07c062484ab, []int{99} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2852,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{96} + return fileDescriptor_6c07b07c062484ab, []int{100} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2880,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{97} + return fileDescriptor_6c07b07c062484ab, []int{101} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2908,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{98} + return fileDescriptor_6c07b07c062484ab, []int{102} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2936,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{99} + return fileDescriptor_6c07b07c062484ab, []int{103} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2964,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{100} + return fileDescriptor_6c07b07c062484ab, []int{104} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2992,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{101} + return fileDescriptor_6c07b07c062484ab, []int{105} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +3020,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{102} + return fileDescriptor_6c07b07c062484ab, []int{106} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +3048,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeFeatures) Reset() { *m = NodeFeatures{} } func (*NodeFeatures) ProtoMessage() {} func (*NodeFeatures) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{103} + return fileDescriptor_6c07b07c062484ab, []int{107} } func (m *NodeFeatures) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3076,7 @@ var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{104} + return fileDescriptor_6c07b07c062484ab, []int{108} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3104,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{105} + return fileDescriptor_6c07b07c062484ab, []int{109} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3132,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} } func (*NodeRuntimeHandler) ProtoMessage() {} func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{106} + return fileDescriptor_6c07b07c062484ab, []int{110} } func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3160,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} } func (*NodeRuntimeHandlerFeatures) ProtoMessage() {} func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{107} + return fileDescriptor_6c07b07c062484ab, []int{111} } func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3188,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{108} + return fileDescriptor_6c07b07c062484ab, []int{112} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3216,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{109} + return fileDescriptor_6c07b07c062484ab, []int{113} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3244,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{110} + return fileDescriptor_6c07b07c062484ab, []int{114} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3272,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{111} + return fileDescriptor_6c07b07c062484ab, []int{115} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3300,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{112} + return fileDescriptor_6c07b07c062484ab, []int{116} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3328,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSwapStatus) Reset() { *m = NodeSwapStatus{} } func (*NodeSwapStatus) ProtoMessage() {} func (*NodeSwapStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{113} + return fileDescriptor_6c07b07c062484ab, []int{117} } func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3356,7 @@ var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{114} + return fileDescriptor_6c07b07c062484ab, []int{118} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3384,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{115} + return fileDescriptor_6c07b07c062484ab, []int{119} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3412,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{116} + return fileDescriptor_6c07b07c062484ab, []int{120} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3440,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{117} + return fileDescriptor_6c07b07c062484ab, []int{121} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3468,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{118} + return fileDescriptor_6c07b07c062484ab, []int{122} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{119} + return fileDescriptor_6c07b07c062484ab, []int{123} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{120} + return fileDescriptor_6c07b07c062484ab, []int{124} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{121} + return fileDescriptor_6c07b07c062484ab, []int{125} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{122} + return fileDescriptor_6c07b07c062484ab, []int{126} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{123} + return fileDescriptor_6c07b07c062484ab, []int{127} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{124} + return fileDescriptor_6c07b07c062484ab, []int{128} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3664,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{125} + return fileDescriptor_6c07b07c062484ab, []int{129} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3692,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{126} + return fileDescriptor_6c07b07c062484ab, []int{130} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3720,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{127} + return fileDescriptor_6c07b07c062484ab, []int{131} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3748,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{128} + return fileDescriptor_6c07b07c062484ab, []int{132} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3776,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{129} + return fileDescriptor_6c07b07c062484ab, []int{133} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3804,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{130} + return fileDescriptor_6c07b07c062484ab, []int{134} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3832,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{131} + return fileDescriptor_6c07b07c062484ab, []int{135} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3860,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{132} + return fileDescriptor_6c07b07c062484ab, []int{136} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3888,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{133} + return fileDescriptor_6c07b07c062484ab, []int{137} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3916,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{134} + return fileDescriptor_6c07b07c062484ab, []int{138} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3829,10 +3941,38 @@ func (m *PodAttachOptions) XXX_DiscardUnknown() { var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo +func (m *PodCertificateProjection) Reset() { *m = PodCertificateProjection{} } +func (*PodCertificateProjection) ProtoMessage() {} +func (*PodCertificateProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{139} +} +func (m *PodCertificateProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateProjection.Merge(m, src) +} +func (m *PodCertificateProjection) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateProjection) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateProjection proto.InternalMessageInfo + func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{135} + return fileDescriptor_6c07b07c062484ab, []int{140} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +4000,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{136} + return fileDescriptor_6c07b07c062484ab, []int{141} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +4028,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{137} + return fileDescriptor_6c07b07c062484ab, []int{142} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +4056,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{138} + return fileDescriptor_6c07b07c062484ab, []int{143} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3941,10 +4081,38 @@ func (m *PodExecOptions) XXX_DiscardUnknown() { var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo +func (m *PodExtendedResourceClaimStatus) Reset() { *m = PodExtendedResourceClaimStatus{} } +func (*PodExtendedResourceClaimStatus) ProtoMessage() {} +func (*PodExtendedResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{144} +} +func (m *PodExtendedResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodExtendedResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodExtendedResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodExtendedResourceClaimStatus.Merge(m, src) +} +func (m *PodExtendedResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PodExtendedResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodExtendedResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodExtendedResourceClaimStatus proto.InternalMessageInfo + func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{139} + return fileDescriptor_6c07b07c062484ab, []int{145} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4140,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{140} + return fileDescriptor_6c07b07c062484ab, []int{146} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4168,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{141} + return fileDescriptor_6c07b07c062484ab, []int{147} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4196,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{142} + return fileDescriptor_6c07b07c062484ab, []int{148} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4224,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{143} + return fileDescriptor_6c07b07c062484ab, []int{149} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4252,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{144} + return fileDescriptor_6c07b07c062484ab, []int{150} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4280,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{145} + return fileDescriptor_6c07b07c062484ab, []int{151} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4308,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{146} + return fileDescriptor_6c07b07c062484ab, []int{152} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4336,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{147} + return fileDescriptor_6c07b07c062484ab, []int{153} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4364,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{148} + return fileDescriptor_6c07b07c062484ab, []int{154} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4392,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{149} + return fileDescriptor_6c07b07c062484ab, []int{155} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4420,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{150} + return fileDescriptor_6c07b07c062484ab, []int{156} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4448,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{151} + return fileDescriptor_6c07b07c062484ab, []int{157} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4476,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{152} + return fileDescriptor_6c07b07c062484ab, []int{158} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4504,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{153} + return fileDescriptor_6c07b07c062484ab, []int{159} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4532,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{154} + return fileDescriptor_6c07b07c062484ab, []int{160} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4560,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{155} + return fileDescriptor_6c07b07c062484ab, []int{161} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4588,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{156} + return fileDescriptor_6c07b07c062484ab, []int{162} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4616,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{157} + return fileDescriptor_6c07b07c062484ab, []int{163} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4644,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{158} + return fileDescriptor_6c07b07c062484ab, []int{164} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4672,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{159} + return fileDescriptor_6c07b07c062484ab, []int{165} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4700,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{160} + return fileDescriptor_6c07b07c062484ab, []int{166} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4728,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{161} + return fileDescriptor_6c07b07c062484ab, []int{167} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4756,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{162} + return fileDescriptor_6c07b07c062484ab, []int{168} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4784,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{163} + return fileDescriptor_6c07b07c062484ab, []int{169} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4812,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{164} + return fileDescriptor_6c07b07c062484ab, []int{170} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4840,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{165} + return fileDescriptor_6c07b07c062484ab, []int{171} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4868,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{166} + return fileDescriptor_6c07b07c062484ab, []int{172} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4896,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{167} + return fileDescriptor_6c07b07c062484ab, []int{173} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4924,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{168} + return fileDescriptor_6c07b07c062484ab, []int{174} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4952,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{169} + return fileDescriptor_6c07b07c062484ab, []int{175} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4980,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{170} + return fileDescriptor_6c07b07c062484ab, []int{176} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +5008,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{171} + return fileDescriptor_6c07b07c062484ab, []int{177} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +5036,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{172} + return fileDescriptor_6c07b07c062484ab, []int{178} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +5064,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{173} + return fileDescriptor_6c07b07c062484ab, []int{179} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5092,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{174} + return fileDescriptor_6c07b07c062484ab, []int{180} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5120,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{175} + return fileDescriptor_6c07b07c062484ab, []int{181} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5148,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceHealth) Reset() { *m = ResourceHealth{} } func (*ResourceHealth) ProtoMessage() {} func (*ResourceHealth) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{176} + return fileDescriptor_6c07b07c062484ab, []int{182} } func (m *ResourceHealth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5176,7 @@ var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{177} + return fileDescriptor_6c07b07c062484ab, []int{183} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5204,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{178} + return fileDescriptor_6c07b07c062484ab, []int{184} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5232,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{179} + return fileDescriptor_6c07b07c062484ab, []int{185} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5260,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{180} + return fileDescriptor_6c07b07c062484ab, []int{186} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5288,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{181} + return fileDescriptor_6c07b07c062484ab, []int{187} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5316,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } func (*ResourceStatus) ProtoMessage() {} func (*ResourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{182} + return fileDescriptor_6c07b07c062484ab, []int{188} } func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5344,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{183} + return fileDescriptor_6c07b07c062484ab, []int{189} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5372,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{184} + return fileDescriptor_6c07b07c062484ab, []int{190} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5400,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{185} + return fileDescriptor_6c07b07c062484ab, []int{191} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5428,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{186} + return fileDescriptor_6c07b07c062484ab, []int{192} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5456,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{187} + return fileDescriptor_6c07b07c062484ab, []int{193} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5484,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{188} + return fileDescriptor_6c07b07c062484ab, []int{194} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5512,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{189} + return fileDescriptor_6c07b07c062484ab, []int{195} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5540,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{190} + return fileDescriptor_6c07b07c062484ab, []int{196} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5568,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{191} + return fileDescriptor_6c07b07c062484ab, []int{197} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5596,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{192} + return fileDescriptor_6c07b07c062484ab, []int{198} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5624,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{193} + return fileDescriptor_6c07b07c062484ab, []int{199} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5652,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{194} + return fileDescriptor_6c07b07c062484ab, []int{200} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5680,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{195} + return fileDescriptor_6c07b07c062484ab, []int{201} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5708,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{196} + return fileDescriptor_6c07b07c062484ab, []int{202} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5736,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{197} + return fileDescriptor_6c07b07c062484ab, []int{203} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5764,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{198} + return fileDescriptor_6c07b07c062484ab, []int{204} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5792,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{199} + return fileDescriptor_6c07b07c062484ab, []int{205} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5820,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{200} + return fileDescriptor_6c07b07c062484ab, []int{206} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5848,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{201} + return fileDescriptor_6c07b07c062484ab, []int{207} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5876,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{202} + return fileDescriptor_6c07b07c062484ab, []int{208} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5904,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{203} + return fileDescriptor_6c07b07c062484ab, []int{209} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5932,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{204} + return fileDescriptor_6c07b07c062484ab, []int{210} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5960,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{205} + return fileDescriptor_6c07b07c062484ab, []int{211} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5988,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{206} + return fileDescriptor_6c07b07c062484ab, []int{212} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +6016,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{207} + return fileDescriptor_6c07b07c062484ab, []int{213} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +6044,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *SleepAction) Reset() { *m = SleepAction{} } func (*SleepAction) ProtoMessage() {} func (*SleepAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{208} + return fileDescriptor_6c07b07c062484ab, []int{214} } func (m *SleepAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +6072,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{209} + return fileDescriptor_6c07b07c062484ab, []int{215} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +6100,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{210} + return fileDescriptor_6c07b07c062484ab, []int{216} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +6128,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{211} + return fileDescriptor_6c07b07c062484ab, []int{217} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6156,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{212} + return fileDescriptor_6c07b07c062484ab, []int{218} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6184,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{213} + return fileDescriptor_6c07b07c062484ab, []int{219} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6212,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{214} + return fileDescriptor_6c07b07c062484ab, []int{220} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6240,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{215} + return fileDescriptor_6c07b07c062484ab, []int{221} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6268,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{216} + return fileDescriptor_6c07b07c062484ab, []int{222} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6296,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{217} + return fileDescriptor_6c07b07c062484ab, []int{223} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6156,7 +6324,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{218} + return fileDescriptor_6c07b07c062484ab, []int{224} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6184,7 +6352,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{219} + return fileDescriptor_6c07b07c062484ab, []int{225} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6212,7 +6380,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{220} + return fileDescriptor_6c07b07c062484ab, []int{226} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6240,7 +6408,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{221} + return fileDescriptor_6c07b07c062484ab, []int{227} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6268,7 +6436,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{222} + return fileDescriptor_6c07b07c062484ab, []int{228} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6296,7 +6464,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} } func (*VolumeMountStatus) ProtoMessage() {} func (*VolumeMountStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{223} + return fileDescriptor_6c07b07c062484ab, []int{229} } func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6324,7 +6492,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{224} + return fileDescriptor_6c07b07c062484ab, []int{230} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6352,7 +6520,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{225} + return fileDescriptor_6c07b07c062484ab, []int{231} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6380,7 +6548,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } func (*VolumeResourceRequirements) ProtoMessage() {} func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{226} + return fileDescriptor_6c07b07c062484ab, []int{232} } func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6408,7 +6576,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{227} + return fileDescriptor_6c07b07c062484ab, []int{233} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6436,7 +6604,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{228} + return fileDescriptor_6c07b07c062484ab, []int{234} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6464,7 +6632,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{229} + return fileDescriptor_6c07b07c062484ab, []int{235} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6492,7 +6660,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{230} + return fileDescriptor_6c07b07c062484ab, []int{236} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6551,9 +6719,12 @@ func init() { proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerExtendedResourceRequest)(nil), "k8s.io.api.core.v1.ContainerExtendedResourceRequest") proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy") + proto.RegisterType((*ContainerRestartRule)(nil), "k8s.io.api.core.v1.ContainerRestartRule") + proto.RegisterType((*ContainerRestartRuleOnExitCodes)(nil), "k8s.io.api.core.v1.ContainerRestartRuleOnExitCodes") proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") @@ -6583,6 +6754,7 @@ func init() { proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FileKeySelector)(nil), "k8s.io.api.core.v1.FileKeySelector") proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") @@ -6671,10 +6843,12 @@ func init() { proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCertificateProjection)(nil), "k8s.io.api.core.v1.PodCertificateProjection") proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodExtendedResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodExtendedResourceClaimStatus") proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") @@ -6787,1020 +6961,1049 @@ func init() { } var fileDescriptor_6c07b07c062484ab = []byte{ - // 16206 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, - 0x75, 0x30, 0xc6, 0xea, 0x9e, 0xf3, 0xcd, 0x9d, 0xb8, 0x06, 0xb3, 0x00, 0x1a, 0x5b, 0xbb, 0x8b, - 0xc5, 0x5e, 0x03, 0x62, 0x0f, 0x2e, 0xb8, 0xbb, 0x5c, 0xed, 0x9c, 0x40, 0x2f, 0x30, 0x83, 0xde, - 0xec, 0x01, 0x40, 0x2e, 0x97, 0x14, 0x0b, 0xdd, 0x39, 0x33, 0xc5, 0xe9, 0xae, 0xea, 0xad, 0xaa, - 0x1e, 0x60, 0x60, 0x2a, 0x24, 0x51, 0x16, 0x25, 0x52, 0x72, 0x04, 0x43, 0x21, 0x59, 0x0e, 0x4a, - 0xa1, 0x1f, 0xba, 0x65, 0x5a, 0xb2, 0x68, 0xc9, 0x92, 0x2c, 0xea, 0xb2, 0x2d, 0x47, 0xc8, 0xfe, - 0x21, 0x4b, 0x8a, 0x30, 0xa9, 0xb0, 0xc2, 0x23, 0x73, 0x6c, 0x87, 0x42, 0x3f, 0x2c, 0x29, 0x64, - 0xff, 0xb0, 0x27, 0xf4, 0x7d, 0xfc, 0x22, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x07, 0x0b, 0x0c, 0x97, - 0x8c, 0xfd, 0xd7, 0x9d, 0xef, 0xe5, 0xcb, 0xac, 0x3c, 0x5f, 0xbe, 0x13, 0xec, 0xad, 0x4b, 0xe1, - 0xac, 0xeb, 0x5f, 0x70, 0x5a, 0xee, 0x85, 0x9a, 0x1f, 0x90, 0x0b, 0xdb, 0x17, 0x2f, 0x6c, 0x10, - 0x8f, 0x04, 0x4e, 0x44, 0xea, 0xb3, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xce, 0xac, 0xd3, 0x72, - 0x67, 0x29, 0xce, 0xec, 0xf6, 0xc5, 0x99, 0xe7, 0x36, 0xdc, 0x68, 0xb3, 0x7d, 0x7b, 0xb6, 0xe6, - 0x37, 0x2f, 0x6c, 0xf8, 0x1b, 0xfe, 0x05, 0x86, 0x7a, 0xbb, 0xbd, 0xce, 0xfe, 0xb1, 0x3f, 0xec, - 0x17, 0x27, 0x31, 0xf3, 0x62, 0xdc, 0x4c, 0xd3, 0xa9, 0x6d, 0xba, 0x1e, 0x09, 0x76, 0x2e, 0xb4, - 0xb6, 0x36, 0x58, 0xbb, 0x01, 0x09, 0xfd, 0x76, 0x50, 0x23, 0xc9, 0x86, 0x3b, 0xd6, 0x0a, 0x2f, - 0x34, 0x49, 0xe4, 0x64, 0x74, 0x77, 0xe6, 0x42, 0x5e, 0xad, 0xa0, 0xed, 0x45, 0x6e, 0x33, 0xdd, - 0xcc, 0x47, 0xba, 0x55, 0x08, 0x6b, 0x9b, 0xa4, 0xe9, 0xa4, 0xea, 0xbd, 0x90, 0x57, 0xaf, 0x1d, - 0xb9, 0x8d, 0x0b, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, 0x7f, 0xd3, 0x82, 0xb3, 0x73, 0xb7, - 0xaa, 0x4b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x7c, 0xc3, 0xaf, 0x6d, 0x55, 0x23, 0x3f, 0x20, 0x37, - 0xfd, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0xb3, 0x30, 0xb4, 0xcd, 0xfe, 0x97, 0x17, 0xa7, - 0xad, 0xb3, 0xd6, 0xf9, 0xe1, 0xf9, 0xc9, 0xbf, 0xd8, 0x2d, 0x7d, 0x68, 0x6f, 0xb7, 0x34, 0x74, - 0x53, 0x94, 0x63, 0x85, 0x81, 0xce, 0xc1, 0xc0, 0x7a, 0xb8, 0xb6, 0xd3, 0x22, 0xd3, 0x05, 0x86, - 0x3b, 0x2e, 0x70, 0x07, 0x96, 0xab, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x00, 0xc3, 0x2d, 0x27, 0x88, - 0xdc, 0xc8, 0xf5, 0xbd, 0xe9, 0xe2, 0x59, 0xeb, 0x7c, 0xff, 0xfc, 0x94, 0x40, 0x1d, 0xae, 0x48, - 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0xd4, 0xaf, 0x7b, 0x8d, 0x9d, 0xe9, 0xbe, 0xb3, 0xd6, - 0xf9, 0xa1, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xa5, 0x00, 0x43, 0x73, 0xeb, 0xeb, - 0xae, 0xe7, 0x46, 0x3b, 0xe8, 0x26, 0x8c, 0x7a, 0x7e, 0x9d, 0xc8, 0xff, 0xec, 0x2b, 0x46, 0x9e, - 0x3f, 0x3b, 0x9b, 0x5e, 0x4a, 0xb3, 0xab, 0x1a, 0xde, 0xfc, 0xe4, 0xde, 0x6e, 0x69, 0x54, 0x2f, - 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xa4, 0xe5, 0xd7, 0x15, 0xd9, 0x02, 0x23, 0x5b, 0xca, 0x22, 0x5b, - 0x89, 0xd1, 0xe6, 0x27, 0xf6, 0x76, 0x4b, 0x23, 0x5a, 0x01, 0xd6, 0x89, 0xa0, 0xdb, 0x30, 0x41, - 0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0xc7, 0xf2, 0xe8, 0x6a, 0xa8, 0xf3, 0x47, 0xf6, - 0x76, 0x4b, 0x13, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xff, 0xa4, 0x05, 0x13, 0x73, 0xad, 0xd6, 0x5c, - 0xd0, 0xf4, 0x83, 0x4a, 0xe0, 0xaf, 0xbb, 0x0d, 0x82, 0x5e, 0x86, 0xbe, 0x88, 0xce, 0x1a, 0x9f, - 0xe1, 0xc7, 0xc4, 0xd0, 0xf6, 0xd1, 0xb9, 0xda, 0xdf, 0x2d, 0x1d, 0x49, 0xa0, 0xb3, 0xa9, 0x64, - 0x15, 0xd0, 0x1b, 0x30, 0xd9, 0xf0, 0x6b, 0x4e, 0x63, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, - 0x74, 0x6f, 0xb7, 0x34, 0x79, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x1e, 0x8c, 0xcf, 0x45, 0x91, - 0x53, 0xdb, 0x24, 0x75, 0xbe, 0xa0, 0xd0, 0x8b, 0xd0, 0xe7, 0x39, 0x4d, 0xd9, 0x99, 0xb3, 0xb2, - 0x33, 0xab, 0x4e, 0x93, 0x76, 0x66, 0xf2, 0x86, 0xe7, 0xbe, 0xdb, 0x16, 0x8b, 0x94, 0x96, 0x61, - 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x6d, 0xb7, 0x46, 0x2a, 0x4e, 0xb4, 0x29, 0xfa, 0x80, 0x44, - 0x5d, 0x58, 0x54, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x78, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, - 0x1e, 0xa2, 0x2d, 0x98, 0x68, 0x05, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xda, 0x3a, 0x5b, 0x3c, 0x3f, - 0xf2, 0xfc, 0xf9, 0xcc, 0xb1, 0x37, 0x51, 0x97, 0xbc, 0x28, 0xd8, 0x99, 0x3f, 0x21, 0xda, 0x9b, - 0x48, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xe7, 0x05, 0x38, 0x36, 0x77, 0xaf, 0x1d, 0x90, 0x45, 0x37, - 0xdc, 0x4a, 0x6e, 0xb8, 0xba, 0x1b, 0x6e, 0xad, 0xc6, 0x23, 0xa0, 0x56, 0xfa, 0xa2, 0x28, 0xc7, - 0x0a, 0x03, 0x3d, 0x07, 0x83, 0xf4, 0xf7, 0x0d, 0x5c, 0x16, 0x9f, 0x7c, 0x44, 0x20, 0x8f, 0x2c, - 0x3a, 0x91, 0xb3, 0xc8, 0x41, 0x58, 0xe2, 0xa0, 0x15, 0x18, 0xa9, 0xb1, 0xf3, 0x61, 0x63, 0xc5, - 0xaf, 0x13, 0xb6, 0xb6, 0x86, 0xe7, 0x9f, 0xa1, 0xe8, 0x0b, 0x71, 0xf1, 0xfe, 0x6e, 0x69, 0x9a, - 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xdd, 0xfb, 0x18, 0x25, 0xc8, 0xd8, - 0xea, 0xe7, 0xb5, 0x9d, 0xdb, 0xcf, 0x76, 0xee, 0x68, 0xf6, 0xae, 0x45, 0x17, 0xa1, 0x6f, 0xcb, - 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd6, 0x69, 0x3a, 0xe7, 0x57, 0x5d, 0xaf, 0xbe, 0xbf, 0x5b, 0x9a, - 0x32, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0x1f, 0x0b, 0x4a, 0x0c, 0xb6, 0xec, 0x36, 0x48, - 0x85, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x1e, 0x20, 0x24, 0xb5, 0x80, 0x44, - 0xda, 0x90, 0xaa, 0x85, 0x51, 0x55, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, 0xb6, - 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x2a, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xb7, 0xf3, - 0x09, 0x7d, 0x0c, 0x26, 0xe2, 0xc6, 0xc2, 0x96, 0x53, 0x93, 0x03, 0xc8, 0x76, 0x70, 0xd5, 0x04, - 0xe1, 0x24, 0xae, 0xfd, 0x9f, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, 0x7f, 0xab, 0xfd, 0x07, - 0x16, 0x0c, 0xce, 0xbb, 0x5e, 0xdd, 0xf5, 0x36, 0xd0, 0x67, 0x60, 0x88, 0x5e, 0x95, 0x75, 0x27, - 0x72, 0xc4, 0x31, 0xfc, 0x61, 0x6d, 0x6f, 0xa9, 0x9b, 0x6b, 0xb6, 0xb5, 0xb5, 0x41, 0x0b, 0xc2, - 0x59, 0x8a, 0x4d, 0x77, 0xdb, 0xf5, 0xdb, 0x9f, 0x25, 0xb5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39, - 0x71, 0x19, 0x56, 0x54, 0xd1, 0x55, 0x18, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xe2, 0x3c, 0xce, 0x3c, - 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x6a, 0x24, 0xbe, 0xa5, 0xd6, 0x58, 0x55, 0x2c, 0x48, - 0xd8, 0xff, 0x6e, 0x10, 0x4e, 0x2e, 0x54, 0xcb, 0x39, 0xeb, 0xea, 0x1c, 0x0c, 0xd4, 0x03, 0x77, - 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x45, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x82, 0x51, 0x7e, 0x3f, - 0x5e, 0x71, 0xbc, 0x7a, 0x7c, 0x3c, 0x0a, 0xec, 0xd1, 0x9b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, 0x5c, - 0x54, 0xe7, 0x12, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xa2, 0x05, 0x93, 0xbc, 0x99, 0xb9, 0x28, 0x0a, - 0xdc, 0xdb, 0xed, 0x88, 0x84, 0xd3, 0xfd, 0xec, 0xa4, 0x5b, 0xc8, 0x1a, 0xad, 0xdc, 0x11, 0x98, - 0xbd, 0x99, 0xa0, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, 0x64, 0x12, 0x8c, 0x53, 0xcd, 0xa2, 0x1f, - 0xb1, 0x60, 0xa6, 0xe6, 0x7b, 0x51, 0xe0, 0x37, 0x1a, 0x24, 0xa8, 0xb4, 0x6f, 0x37, 0xdc, 0x70, - 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0x99, 0xbd, - 0xdd, 0xd2, 0xcc, 0x42, 0x2e, 0x29, 0xdc, 0xa1, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0xd5, 0xc8, - 0xd9, 0x20, 0x71, 0xe3, 0x83, 0xbd, 0x37, 0x7e, 0x7c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, 0x70, - 0x06, 0x59, 0xf4, 0x2e, 0x1c, 0xa5, 0xa5, 0xa9, 0x6f, 0x1d, 0xea, 0xbd, 0xb9, 0xe9, 0xbd, 0xdd, - 0xd2, 0xd1, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x21, 0x0b, 0x4e, 0xc6, 0x9f, 0xbf, 0x74, - 0xb7, 0xe5, 0x78, 0xf5, 0xb8, 0xe1, 0xe1, 0xde, 0x1b, 0xa6, 0x67, 0xf2, 0xc9, 0x85, 0x3c, 0x4a, - 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x11, 0xda, 0xb5, 0x64, 0xdb, 0xd0, 0x7b, 0xdb, 0x27, 0xf6, 0x76, - 0x4b, 0x47, 0x56, 0xd3, 0x34, 0x70, 0x16, 0xe1, 0x99, 0x05, 0x38, 0x96, 0xb9, 0x3a, 0xd1, 0x24, - 0x14, 0xb7, 0x08, 0x67, 0x02, 0x87, 0x31, 0xfd, 0x89, 0x8e, 0x42, 0xff, 0xb6, 0xd3, 0x68, 0x8b, - 0x8d, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x97, 0x2c, 0xfb, 0x7f, 0x28, 0xc2, 0xc4, 0x42, 0xb5, 0x7c, - 0x5f, 0xbb, 0x5e, 0xbf, 0xf6, 0x0a, 0x1d, 0xaf, 0xbd, 0xf8, 0x12, 0x2d, 0xe6, 0x5e, 0xa2, 0x3f, - 0x98, 0xb1, 0x65, 0xfb, 0xd8, 0x96, 0xfd, 0x68, 0xce, 0x96, 0x7d, 0xc0, 0x1b, 0x75, 0x3b, 0x67, - 0xd5, 0xf6, 0xb3, 0x09, 0xcc, 0xe4, 0x90, 0x18, 0xef, 0x97, 0x3c, 0x6a, 0x0f, 0xb8, 0x74, 0x1f, - 0xcc, 0x3c, 0xd6, 0x60, 0x74, 0xc1, 0x69, 0x39, 0xb7, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, - 0x42, 0xd1, 0xa9, 0xd7, 0x19, 0x77, 0x37, 0x3c, 0x7f, 0x6c, 0x6f, 0xb7, 0x54, 0x9c, 0xab, 0x53, - 0x36, 0x03, 0x14, 0xd6, 0x0e, 0xa6, 0x18, 0xe8, 0x69, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, - 0x30, 0xe9, 0x2e, 0xef, 0x5b, 0x0c, 0xfc, 0x56, 0x02, 0x95, 0xe1, 0xd8, 0x7f, 0x56, 0x80, 0x53, - 0x0b, 0xa4, 0xb5, 0xb9, 0x5c, 0xcd, 0xb9, 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, - 0x42, 0xd1, 0x34, 0x5b, 0x11, 0x2b, 0xa2, 0x0c, 0x2b, 0x28, 0x3a, 0x0b, 0x7d, 0xad, 0x98, 0x89, - 0x1d, 0x95, 0x0c, 0x30, 0x63, 0x5f, 0x19, 0x84, 0x62, 0xb4, 0x43, 0x12, 0x88, 0x15, 0xa3, 0x30, - 0x6e, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x73, 0x02, 0x94, 0x47, 0x10, 0x37, 0x42, 0x82, 0x13, 0xa0, - 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x0c, 0x87, 0x89, 0x99, 0xed, 0x69, 0x6b, 0x8e, 0x31, 0x56, 0x41, - 0xcd, 0x64, 0x4c, 0xc4, 0xb8, 0xc1, 0x06, 0xba, 0xb2, 0x0a, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, - 0x5d, 0x36, 0x70, 0x37, 0xd2, 0x03, 0xd7, 0xfb, 0x96, 0x78, 0x50, 0xa3, 0xf7, 0xff, 0x5a, 0x70, - 0x6a, 0xc1, 0xf5, 0xea, 0x24, 0xc8, 0x59, 0x80, 0x0f, 0xe7, 0x29, 0x7f, 0x30, 0x26, 0xc5, 0x58, - 0x62, 0x7d, 0x0f, 0x60, 0x89, 0xd9, 0xff, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xbe, 0xfb, 0xd8, 0x1b, - 0xe9, 0x8f, 0x7d, 0x00, 0xcb, 0xc2, 0xbe, 0x06, 0xe3, 0x0b, 0x0d, 0x97, 0x78, 0x51, 0xb9, 0xb2, - 0xe0, 0x7b, 0xeb, 0xee, 0x06, 0x7a, 0x05, 0xc6, 0x23, 0xb7, 0x49, 0xfc, 0x76, 0x54, 0x25, 0x35, - 0xdf, 0x63, 0x2f, 0x57, 0xeb, 0x7c, 0xff, 0x3c, 0xda, 0xdb, 0x2d, 0x8d, 0xaf, 0x19, 0x10, 0x9c, - 0xc0, 0xb4, 0x7f, 0x95, 0x9e, 0x5b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2d, 0x68, 0x87, 0xd1, 0x7c, - 0x9b, 0xf2, 0x9e, 0x95, 0xc0, 0xa7, 0xdd, 0x71, 0x7d, 0x0f, 0x9d, 0x32, 0x9e, 0xe3, 0x43, 0xf2, - 0x29, 0x2e, 0x9e, 0xdd, 0xb3, 0x00, 0xa1, 0xbb, 0xe1, 0x91, 0x40, 0x7b, 0x3e, 0x8c, 0xb3, 0xad, - 0xa2, 0x4a, 0xb1, 0x86, 0x81, 0x1a, 0x30, 0xd6, 0x70, 0x6e, 0x93, 0x46, 0x95, 0x34, 0x48, 0x2d, - 0xf2, 0x03, 0x21, 0xdf, 0x78, 0xa1, 0xb7, 0x77, 0xc0, 0x35, 0xbd, 0xea, 0xfc, 0xd4, 0xde, 0x6e, - 0x69, 0xcc, 0x28, 0xc2, 0x26, 0x71, 0x7a, 0x74, 0xf8, 0x2d, 0xfa, 0x15, 0x4e, 0x43, 0x7f, 0x7c, - 0x5e, 0x17, 0x65, 0x58, 0x41, 0xd5, 0xd1, 0xd1, 0x97, 0x77, 0x74, 0xd8, 0x7f, 0x47, 0x17, 0x9a, - 0xdf, 0x6c, 0xf9, 0x1e, 0xf1, 0xa2, 0x05, 0xdf, 0xab, 0x73, 0xc9, 0xd4, 0x2b, 0x86, 0xe8, 0xe4, - 0x5c, 0x42, 0x74, 0x72, 0x3c, 0x5d, 0x43, 0x93, 0x9e, 0x7c, 0x14, 0x06, 0xc2, 0xc8, 0x89, 0xda, - 0xa1, 0x18, 0xb8, 0x47, 0xe5, 0xb2, 0xab, 0xb2, 0xd2, 0xfd, 0xdd, 0xd2, 0x84, 0xaa, 0xc6, 0x8b, - 0xb0, 0xa8, 0x80, 0x9e, 0x82, 0xc1, 0x26, 0x09, 0x43, 0x67, 0x43, 0xb2, 0x0d, 0x13, 0xa2, 0xee, - 0xe0, 0x0a, 0x2f, 0xc6, 0x12, 0x8e, 0x1e, 0x83, 0x7e, 0x12, 0x04, 0x7e, 0x20, 0xbe, 0x6d, 0x4c, - 0x20, 0xf6, 0x2f, 0xd1, 0x42, 0xcc, 0x61, 0xf6, 0xff, 0x6c, 0xc1, 0x84, 0xea, 0x2b, 0x6f, 0xeb, - 0x10, 0x9e, 0x6b, 0x6f, 0x03, 0xd4, 0xe4, 0x07, 0x86, 0xec, 0x9a, 0x1d, 0x79, 0xfe, 0x5c, 0x26, - 0x47, 0x93, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe, 0x63, 0x0b, 0x8e, 0x24, - 0xbe, 0xe8, 0x9a, 0x1b, 0x46, 0xe8, 0x9d, 0xd4, 0x57, 0xcd, 0xf6, 0xb8, 0xf8, 0xdc, 0x90, 0x7f, - 0x93, 0xda, 0xf3, 0xb2, 0x44, 0xfb, 0xa2, 0x2b, 0xd0, 0xef, 0x46, 0xa4, 0x29, 0x3f, 0xe6, 0xb1, - 0x8e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x99, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, 0x67, 0x45, 0x18, - 0xe6, 0xfb, 0x7b, 0xc5, 0x69, 0x1d, 0xc2, 0x5c, 0x3c, 0x03, 0xc3, 0x6e, 0xb3, 0xd9, 0x8e, 0x9c, - 0xdb, 0xe2, 0xde, 0x1b, 0xe2, 0x67, 0x50, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0xca, 0xd0, 0xc7, 0xba, - 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, 0x7d, 0x9f, 0x5d, 0x74, 0x22, 0x87, 0xb3, 0x9c, 0x6a, - 0x5f, 0xd1, 0x22, 0xcc, 0x48, 0x20, 0x07, 0xe0, 0xb6, 0xeb, 0x39, 0xc1, 0x0e, 0x2d, 0x9b, 0x2e, - 0x32, 0x82, 0xcf, 0x75, 0x26, 0x38, 0xaf, 0xf0, 0x39, 0x59, 0xf5, 0x61, 0x31, 0x00, 0x6b, 0x44, - 0x67, 0x5e, 0x86, 0x61, 0x85, 0x7c, 0x10, 0xce, 0x71, 0xe6, 0x63, 0x30, 0x91, 0x68, 0xab, 0x5b, - 0xf5, 0x51, 0x9d, 0xf1, 0xfc, 0x43, 0x76, 0x64, 0x88, 0x5e, 0x2f, 0x79, 0xdb, 0xe2, 0x6e, 0xba, - 0x07, 0x47, 0x1b, 0x19, 0x47, 0xbe, 0x98, 0xd7, 0xde, 0xaf, 0x88, 0x53, 0xe2, 0xb3, 0x8f, 0x66, - 0x41, 0x71, 0x66, 0x1b, 0xc6, 0x89, 0x58, 0xe8, 0x74, 0x22, 0xd2, 0xf3, 0xee, 0xa8, 0xea, 0xfc, - 0x55, 0xb2, 0xa3, 0x0e, 0xd5, 0xef, 0x64, 0xf7, 0x4f, 0xf3, 0xd1, 0xe7, 0xc7, 0xe5, 0x88, 0x20, - 0x50, 0xbc, 0x4a, 0x76, 0xf8, 0x54, 0xe8, 0x5f, 0x57, 0xec, 0xf8, 0x75, 0x5f, 0xb3, 0x60, 0x4c, - 0x7d, 0xdd, 0x21, 0x9c, 0x0b, 0xf3, 0xe6, 0xb9, 0x70, 0xba, 0xe3, 0x02, 0xcf, 0x39, 0x11, 0xbe, - 0x5e, 0x80, 0x93, 0x0a, 0x87, 0x3e, 0xa2, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0xc0, 0xb0, 0xa7, 0xc4, - 0x89, 0x96, 0x29, 0xc7, 0x8b, 0x85, 0x89, 0x31, 0x0e, 0xbd, 0xf2, 0xbc, 0xf8, 0xd2, 0x1e, 0xd5, - 0xe5, 0xec, 0xe2, 0x72, 0x9f, 0x87, 0x62, 0xdb, 0xad, 0x8b, 0x0b, 0xe6, 0xc3, 0x72, 0xb4, 0x6f, - 0x94, 0x17, 0xf7, 0x77, 0x4b, 0x8f, 0xe6, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0xde, 0x28, 0x2f, - 0x62, 0x5a, 0x19, 0xcd, 0xc1, 0x84, 0xd4, 0xaa, 0xdd, 0xa4, 0x7c, 0xa9, 0xef, 0x89, 0x7b, 0x48, - 0x09, 0xcb, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0xb4, 0x08, 0x93, 0x5b, 0xed, 0xdb, 0xa4, 0x41, 0x22, - 0xfe, 0xc1, 0x57, 0x09, 0x17, 0x25, 0x0f, 0xc7, 0x4f, 0xd8, 0xab, 0x09, 0x38, 0x4e, 0xd5, 0xb0, - 0xbf, 0xcd, 0xee, 0x03, 0x31, 0x7a, 0x1a, 0x7f, 0xf3, 0x9d, 0x5c, 0xce, 0xbd, 0xac, 0x8a, 0xab, - 0x64, 0x67, 0xcd, 0xa7, 0x7c, 0x48, 0xf6, 0xaa, 0x30, 0xd6, 0x7c, 0x5f, 0xc7, 0x35, 0xff, 0xbb, - 0x05, 0x38, 0xa6, 0x46, 0xc0, 0xe0, 0x96, 0xbf, 0xdb, 0xc7, 0xe0, 0x22, 0x8c, 0xd4, 0xc9, 0xba, - 0xd3, 0x6e, 0x44, 0x4a, 0xaf, 0xd1, 0xcf, 0x55, 0x6d, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0x0e, 0x30, - 0x6c, 0xbf, 0x39, 0xc6, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, 0xd7, 0x3c, - 0x06, 0xfd, 0x6e, 0x93, 0x32, 0x66, 0x05, 0x93, 0xdf, 0x2a, 0xd3, 0x42, 0xcc, 0x61, 0xe8, 0x09, - 0x18, 0xac, 0xf9, 0xcd, 0xa6, 0xe3, 0xd5, 0xd9, 0x95, 0x37, 0x3c, 0x3f, 0x42, 0x79, 0xb7, 0x05, - 0x5e, 0x84, 0x25, 0x8c, 0x32, 0xdf, 0x4e, 0xb0, 0xc1, 0x85, 0x3d, 0x82, 0xf9, 0x9e, 0x0b, 0x36, - 0x42, 0xcc, 0x4a, 0xe9, 0x5b, 0xf5, 0x8e, 0x1f, 0x6c, 0xb9, 0xde, 0xc6, 0xa2, 0x1b, 0x88, 0x2d, - 0xa1, 0xee, 0xc2, 0x5b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x0c, 0xfd, 0x2d, 0x3f, 0x88, 0xc2, 0xe9, - 0x01, 0x36, 0xdc, 0x8f, 0xe6, 0x1c, 0x44, 0xfc, 0x6b, 0x2b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, - 0x85, 0x98, 0x57, 0x47, 0xd7, 0x60, 0x90, 0x78, 0xdb, 0xcb, 0x81, 0xdf, 0x9c, 0x3e, 0x92, 0x4f, - 0x69, 0x89, 0xa3, 0xf0, 0x65, 0x16, 0xf3, 0xa8, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0xa3, 0x50, 0x24, - 0xde, 0xf6, 0xf4, 0x20, 0xa3, 0x34, 0x93, 0x43, 0xe9, 0xa6, 0x13, 0xc4, 0x67, 0xfe, 0x92, 0xb7, - 0x8d, 0x69, 0x1d, 0xf4, 0x09, 0x18, 0x96, 0x07, 0x46, 0x28, 0xa4, 0xa8, 0x99, 0x0b, 0x56, 0x1e, - 0x33, 0x98, 0xbc, 0xdb, 0x76, 0x03, 0xd2, 0x24, 0x5e, 0x14, 0xc6, 0x27, 0xa4, 0x84, 0x86, 0x38, - 0xa6, 0x86, 0x6a, 0x30, 0x1a, 0x90, 0xd0, 0xbd, 0x47, 0x2a, 0x7e, 0xc3, 0xad, 0xed, 0x4c, 0x9f, - 0x60, 0xdd, 0x7b, 0xaa, 0xe3, 0x90, 0x61, 0xad, 0x42, 0x2c, 0xe5, 0xd7, 0x4b, 0xb1, 0x41, 0x14, - 0xbd, 0x05, 0x63, 0x01, 0x09, 0x23, 0x27, 0x88, 0x44, 0x2b, 0xd3, 0x4a, 0x2b, 0x37, 0x86, 0x75, - 0x00, 0x7f, 0x4e, 0xc4, 0xcd, 0xc4, 0x10, 0x6c, 0x52, 0x40, 0x9f, 0x90, 0x2a, 0x87, 0x15, 0xbf, - 0xed, 0x45, 0xe1, 0xf4, 0x30, 0xeb, 0x77, 0xa6, 0x6e, 0xfa, 0x66, 0x8c, 0x97, 0xd4, 0x49, 0xf0, - 0xca, 0xd8, 0x20, 0x85, 0x3e, 0x05, 0x63, 0xfc, 0x3f, 0x57, 0xa9, 0x86, 0xd3, 0xc7, 0x18, 0xed, - 0xb3, 0xf9, 0xb4, 0x39, 0xe2, 0xfc, 0x31, 0x41, 0x7c, 0x4c, 0x2f, 0x0d, 0xb1, 0x49, 0x0d, 0x61, - 0x18, 0x6b, 0xb8, 0xdb, 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0xdb, 0x44, 0x48, 0x88, 0x4f, 0x66, - 0xab, 0x60, 0xfd, 0xdb, 0x44, 0x3c, 0x02, 0xf5, 0x3a, 0xd8, 0x24, 0x81, 0x6e, 0xc0, 0x38, 0x7d, - 0x92, 0xbb, 0x31, 0xd1, 0x91, 0x6e, 0x44, 0xd9, 0xc3, 0x19, 0x1b, 0x95, 0x70, 0x82, 0x08, 0xba, - 0x0e, 0xa3, 0x6c, 0xcc, 0xdb, 0x2d, 0x4e, 0xf4, 0x78, 0x37, 0xa2, 0xcc, 0xa0, 0xa0, 0xaa, 0x55, - 0xc1, 0x06, 0x01, 0xf4, 0x26, 0x0c, 0x37, 0xdc, 0x75, 0x52, 0xdb, 0xa9, 0x35, 0xc8, 0xf4, 0x28, - 0xa3, 0x96, 0x79, 0x18, 0x5e, 0x93, 0x48, 0x9c, 0x3f, 0x57, 0x7f, 0x71, 0x5c, 0x1d, 0xdd, 0x84, - 0xe3, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0x43, 0x4c, 0x3c, 0x09, 0x99, 0x66, 0x7c, 0x8c, 0xad, - 0xae, 0x33, 0x62, 0x36, 0x8e, 0xaf, 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x2e, 0x4c, 0x67, 0x40, - 0xf8, 0xba, 0x3d, 0xca, 0x28, 0xbf, 0x26, 0x28, 0x4f, 0xaf, 0xe5, 0xe0, 0xed, 0x77, 0x80, 0xe1, - 0x5c, 0xea, 0xe8, 0x3a, 0x4c, 0xb0, 0x93, 0xb3, 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1, - 0x27, 0x24, 0x1f, 0x51, 0x36, 0xc1, 0xfb, 0xbb, 0x25, 0x88, 0xff, 0xe1, 0x64, 0x6d, 0x74, 0x9b, - 0x29, 0x61, 0xdb, 0x81, 0x1b, 0xed, 0xd0, 0x5d, 0x45, 0xee, 0x46, 0xd3, 0x13, 0x1d, 0x05, 0x52, - 0x3a, 0xaa, 0xd2, 0xd4, 0xea, 0x85, 0x38, 0x49, 0x90, 0x5e, 0x05, 0x61, 0x54, 0x77, 0xbd, 0xe9, - 0x49, 0xfe, 0x9e, 0x92, 0x27, 0x69, 0x95, 0x16, 0x62, 0x0e, 0x63, 0x0a, 0x58, 0xfa, 0xe3, 0x3a, - 0xbd, 0x71, 0xa7, 0x18, 0x62, 0xac, 0x80, 0x95, 0x00, 0x1c, 0xe3, 0x50, 0x26, 0x38, 0x8a, 0x76, - 0xa6, 0x11, 0x43, 0x55, 0x07, 0xe2, 0xda, 0xda, 0x27, 0x30, 0x2d, 0xb7, 0x6f, 0xc3, 0xb8, 0x3a, - 0x26, 0xd8, 0x98, 0xa0, 0x12, 0xf4, 0x33, 0xb6, 0x4f, 0x88, 0x4f, 0x87, 0x69, 0x17, 0x18, 0x4b, - 0x88, 0x79, 0x39, 0xeb, 0x82, 0x7b, 0x8f, 0xcc, 0xef, 0x44, 0x84, 0xcb, 0x22, 0x8a, 0x5a, 0x17, - 0x24, 0x00, 0xc7, 0x38, 0xf6, 0xbf, 0xe7, 0xec, 0x73, 0x7c, 0x4b, 0xf4, 0x70, 0x2f, 0x3e, 0x0b, - 0x43, 0xcc, 0xf0, 0xc3, 0x0f, 0xb8, 0x76, 0xb6, 0x3f, 0x66, 0x98, 0xaf, 0x88, 0x72, 0xac, 0x30, - 0xd0, 0xab, 0x30, 0x56, 0xd3, 0x1b, 0x10, 0x97, 0xba, 0x3a, 0x46, 0x8c, 0xd6, 0xb1, 0x89, 0x8b, - 0x2e, 0xc1, 0x10, 0xb3, 0x71, 0xaa, 0xf9, 0x0d, 0xc1, 0x6d, 0x4a, 0xce, 0x64, 0xa8, 0x22, 0xca, - 0xf7, 0xb5, 0xdf, 0x58, 0x61, 0xa3, 0x73, 0x30, 0x40, 0xbb, 0x50, 0xae, 0x88, 0xeb, 0x54, 0x49, - 0x02, 0xaf, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x1f, 0x5b, 0x8c, 0x97, 0x4a, 0x9f, 0xf9, 0xe8, 0x0a, - 0xbb, 0x34, 0xd8, 0x0d, 0xa2, 0x69, 0xe1, 0x1f, 0xd7, 0x6e, 0x02, 0x05, 0xdb, 0x4f, 0xfc, 0xc7, - 0x46, 0x4d, 0xf4, 0x76, 0xf2, 0x66, 0xe0, 0x0c, 0xc5, 0x8b, 0x72, 0x08, 0x92, 0xb7, 0xc3, 0x23, - 0xf1, 0x15, 0x47, 0xfb, 0xd3, 0xe9, 0x8a, 0xb0, 0x7f, 0xaa, 0xa0, 0xad, 0x92, 0x6a, 0xe4, 0x44, - 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x21, 0xf8, 0xbe, 0xce, 0x17, 0x1d, 0xab, - 0x74, 0x8b, 0x57, 0xe0, 0xdc, 0x8b, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0x68, 0x7b, 0x1e, 0xa5, - 0x58, 0xe8, 0x95, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x1d, 0x00, 0x79, - 0x42, 0x90, 0xba, 0x90, 0x1d, 0x3e, 0xdb, 0x9d, 0xe8, 0x9a, 0xaa, 0xc3, 0x85, 0x93, 0xf1, 0x7f, - 0xac, 0xd1, 0xb3, 0x23, 0x6d, 0x4e, 0xf5, 0xce, 0xa0, 0x4f, 0xd2, 0x2d, 0xea, 0x04, 0x11, 0xa9, - 0xcf, 0x45, 0x62, 0x70, 0x9e, 0xee, 0xed, 0x71, 0xb8, 0xe6, 0x36, 0x89, 0xbe, 0x9d, 0x05, 0x11, - 0x1c, 0xd3, 0xb3, 0x7f, 0xbf, 0x08, 0xd3, 0x79, 0xdd, 0xa5, 0x9b, 0x86, 0xdc, 0x75, 0xa3, 0x05, - 0xca, 0xd6, 0x5a, 0xe6, 0xa6, 0x59, 0x12, 0xe5, 0x58, 0x61, 0xd0, 0xd5, 0x1b, 0xba, 0x1b, 0xf2, - 0x6d, 0xdf, 0x1f, 0xaf, 0xde, 0x2a, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x02, 0xe2, 0x84, 0xc2, 0xf8, - 0x4e, 0x5b, 0xe5, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xa5, 0x8c, 0x7d, 0x5d, 0xa4, 0x8c, 0xc6, 0x10, - 0xf5, 0x3f, 0xd8, 0x21, 0x42, 0x9f, 0x06, 0x58, 0x77, 0x3d, 0x37, 0xdc, 0x64, 0xd4, 0x07, 0x0e, - 0x4c, 0x5d, 0x31, 0xc5, 0xcb, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x44, 0x1d, 0x20, 0xe5, - 0x45, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x11, 0xeb, 0x78, 0xf6, 0x67, 0x93, 0xeb, - 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x5e, 0xc7, 0xb7, 0xd0, 0x79, 0x7c, 0xed, 0xbf, 0x1e, 0x86, - 0x09, 0xa3, 0xb1, 0x76, 0xd8, 0xc3, 0x99, 0x7b, 0x99, 0x5e, 0x40, 0x4e, 0x44, 0xc4, 0xfe, 0xb3, - 0xbb, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xe1, 0x86, 0x13, 0x32, - 0x89, 0x25, 0x11, 0xfb, 0xae, 0x17, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, 0xd6, 0xe7, 0xb4, - 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, 0xd8, 0x0e, 0xe6, - 0x30, 0x74, 0x89, 0x1d, 0xad, 0x74, 0x55, 0x2c, 0x50, 0x6e, 0x94, 0x2d, 0xb3, 0x7e, 0x83, 0xc9, - 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0x81, 0x0e, 0x6f, 0xb2, 0xa7, 0x60, 0x90, 0xfd, 0x50, - 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0xa1, 0xde, 0x16, 0x0c, 0x7d, - 0xf5, 0x89, 0x45, 0xcd, 0xcc, 0x2e, 0x86, 0xf8, 0x29, 0x27, 0x96, 0x3c, 0x96, 0x30, 0xf4, 0x6b, - 0x16, 0x20, 0xa7, 0x41, 0x5f, 0xcb, 0xb4, 0x58, 0x3d, 0x6e, 0x80, 0xb1, 0xda, 0xaf, 0x76, 0x1d, - 0xf6, 0x76, 0x38, 0x3b, 0x97, 0xaa, 0xcd, 0x25, 0xa5, 0xaf, 0x88, 0x2e, 0xa2, 0x34, 0x82, 0x7e, - 0x19, 0x5d, 0x73, 0xc3, 0xe8, 0xf3, 0x7f, 0x9f, 0xb8, 0x9c, 0x32, 0xba, 0x84, 0x6e, 0xe8, 0x8f, - 0xaf, 0x91, 0x03, 0x3e, 0xbe, 0xc6, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, 0x8c, 0xb2, 0x2f, - 0x7f, 0xa2, 0xcb, 0x03, 0x46, 0x88, 0xd3, 0x7b, 0x79, 0xc6, 0x54, 0x84, 0x1e, 0x78, 0x8c, 0x75, - 0xb9, 0xf3, 0x23, 0xf8, 0x46, 0x48, 0x82, 0xf9, 0x93, 0x52, 0x4d, 0xbc, 0xaf, 0xf3, 0x1e, 0x9a, - 0xde, 0xf8, 0x87, 0x2c, 0x98, 0x4e, 0x0f, 0x10, 0xef, 0xd2, 0xf4, 0x38, 0xeb, 0xbf, 0xdd, 0x69, - 0x64, 0x44, 0xe7, 0xa5, 0xb9, 0xeb, 0xf4, 0x5c, 0x0e, 0x2d, 0x9c, 0xdb, 0x0a, 0xba, 0x04, 0x10, - 0x46, 0x7e, 0x8b, 0x9f, 0xf5, 0x8c, 0x99, 0x1d, 0x66, 0x06, 0x17, 0x50, 0x55, 0xa5, 0xfb, 0xf1, - 0x5d, 0xa0, 0xe1, 0xce, 0xb4, 0xe1, 0x44, 0xce, 0x8a, 0xc9, 0x90, 0x77, 0x2f, 0xea, 0xf2, 0xee, - 0x2e, 0x52, 0xd2, 0x59, 0x39, 0xa7, 0xb3, 0x6f, 0xb5, 0x1d, 0x2f, 0x72, 0xa3, 0x1d, 0x5d, 0x3e, - 0xee, 0x81, 0x39, 0x94, 0xe8, 0x53, 0xd0, 0xdf, 0x70, 0xbd, 0xf6, 0x5d, 0x71, 0xc7, 0x9e, 0xcb, - 0x7e, 0xfe, 0x78, 0xed, 0xbb, 0xe6, 0xe4, 0x94, 0xe8, 0x56, 0x66, 0xe5, 0xfb, 0xbb, 0x25, 0x94, - 0x46, 0xc0, 0x9c, 0xaa, 0xfd, 0x34, 0x8c, 0x2f, 0x3a, 0xa4, 0xe9, 0x7b, 0x4b, 0x5e, 0xbd, 0xe5, - 0xbb, 0x5e, 0x84, 0xa6, 0xa1, 0x8f, 0x31, 0x97, 0xfc, 0x6a, 0xed, 0xa3, 0x83, 0x8f, 0x59, 0x89, - 0xbd, 0x01, 0xc7, 0x16, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0xa8, 0xcf, 0x55, 0xca, 0x9a, 0xbc, 0x70, - 0x55, 0xca, 0xab, 0xac, 0x7c, 0x69, 0x80, 0x56, 0x93, 0x2f, 0xc2, 0x65, 0xb7, 0x41, 0x72, 0xa4, - 0xba, 0x3f, 0x5b, 0x30, 0x5a, 0x8a, 0xf1, 0x95, 0x4e, 0xd2, 0xca, 0x35, 0x67, 0x78, 0x0b, 0x86, - 0xd6, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x2e, 0x66, 0xe3, 0xc9, 0x7c, 0x83, 0xc7, 0x65, 0x8a, 0xa9, - 0x94, 0xa7, 0x4c, 0xda, 0xb5, 0x2c, 0x2a, 0x63, 0x45, 0x06, 0x6d, 0xc1, 0xa4, 0x9c, 0x33, 0x09, - 0x15, 0xe7, 0xfd, 0x53, 0x9d, 0x96, 0xaf, 0x49, 0x9c, 0x19, 0x7f, 0xe3, 0x04, 0x19, 0x9c, 0x22, - 0x8c, 0x4e, 0x41, 0x5f, 0x93, 0x72, 0x36, 0x7d, 0x6c, 0xf8, 0x99, 0x78, 0x8b, 0x49, 0xea, 0x58, - 0xa9, 0xfd, 0xf3, 0x16, 0x9c, 0x48, 0x8d, 0x8c, 0x90, 0x58, 0x3e, 0xe0, 0x59, 0x48, 0x4a, 0x10, - 0x0b, 0xdd, 0x25, 0x88, 0xf6, 0x7f, 0x61, 0xc1, 0xd1, 0xa5, 0x66, 0x2b, 0xda, 0x59, 0x74, 0x4d, - 0xdb, 0x83, 0x97, 0x61, 0xa0, 0x49, 0xea, 0x6e, 0xbb, 0x29, 0x66, 0xae, 0x24, 0x6f, 0xff, 0x15, - 0x56, 0x4a, 0x4f, 0x90, 0x6a, 0xe4, 0x07, 0xce, 0x06, 0xe1, 0x05, 0x58, 0xa0, 0x33, 0x1e, 0xca, - 0xbd, 0x47, 0xae, 0xb9, 0x4d, 0x37, 0xba, 0xbf, 0xdd, 0x25, 0xcc, 0x06, 0x24, 0x11, 0x1c, 0xd3, - 0xb3, 0xbf, 0x69, 0xc1, 0x84, 0x5c, 0xf7, 0x73, 0xf5, 0x7a, 0x40, 0xc2, 0x10, 0xcd, 0x40, 0xc1, - 0x6d, 0x89, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x5c, 0xc1, 0x05, 0xb7, 0x25, 0x9f, 0x6b, 0x8c, 0xc1, - 0x28, 0x9a, 0x16, 0x14, 0x57, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x3c, 0x0c, 0x79, 0x7e, 0x9d, 0xbf, - 0x78, 0x84, 0x0e, 0x9d, 0x62, 0xae, 0x8a, 0x32, 0xac, 0xa0, 0xa8, 0x02, 0xc3, 0xdc, 0xbe, 0x36, - 0x5e, 0xb4, 0x3d, 0x59, 0xe9, 0xb2, 0x2f, 0x5b, 0x93, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x53, 0x0b, - 0x46, 0xe5, 0x97, 0xf5, 0xf8, 0x16, 0xa5, 0x5b, 0x2b, 0x7e, 0x87, 0xc6, 0x5b, 0x8b, 0xbe, 0x25, - 0x19, 0xc4, 0x78, 0x42, 0x16, 0x0f, 0xf4, 0x84, 0xbc, 0x08, 0x23, 0x4e, 0xab, 0x55, 0x31, 0xdf, - 0x9f, 0x6c, 0x29, 0xcd, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xae, 0x00, 0xe3, 0xf2, 0x0b, 0xaa, - 0xed, 0xdb, 0x21, 0x89, 0xd0, 0x1a, 0x0c, 0x3b, 0x7c, 0x96, 0x88, 0x5c, 0xe4, 0x8f, 0x65, 0xcb, - 0x45, 0x8d, 0x29, 0x8d, 0x19, 0xe9, 0x39, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f, - 0x62, 0x4c, 0x95, 0x82, 0x77, 0x52, 0x55, 0x27, 0xa9, 0x9f, 0x14, 0xd4, 0xa7, 0x56, 0x93, 0x54, - 0x70, 0x9a, 0x30, 0x5a, 0x92, 0xb2, 0xe6, 0x62, 0xbe, 0x90, 0x50, 0x9f, 0xb8, 0x6c, 0x51, 0xb3, - 0xfd, 0x47, 0x16, 0x0c, 0x4b, 0xb4, 0xc3, 0xb0, 0x4a, 0x58, 0x81, 0xc1, 0x90, 0x4d, 0x82, 0x1c, - 0x1a, 0xbb, 0x53, 0xc7, 0xf9, 0x7c, 0xc5, 0xbc, 0x22, 0xff, 0x1f, 0x62, 0x49, 0x83, 0xa9, 0x1a, - 0x55, 0xf7, 0xdf, 0x27, 0xaa, 0x46, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x1f, 0x58, 0x9f, 0x35, 0xd9, - 0x3d, 0x7d, 0xd2, 0xb4, 0x02, 0xb2, 0xee, 0xde, 0x4d, 0x3e, 0x69, 0x2a, 0xac, 0x14, 0x0b, 0x28, - 0x7a, 0x07, 0x46, 0x6b, 0x52, 0xc7, 0x14, 0xef, 0xf0, 0x73, 0x1d, 0xf5, 0x9d, 0x4a, 0x35, 0xce, - 0x65, 0xa4, 0x0b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xed, 0xc7, 0x8a, 0xdd, 0xec, 0xc7, 0x62, 0xba, - 0xf9, 0xd6, 0x54, 0xbf, 0x60, 0xc1, 0x00, 0xd7, 0x2d, 0xf4, 0xa6, 0xda, 0xd1, 0x2c, 0x05, 0xe2, - 0xb1, 0xbb, 0x49, 0x0b, 0x05, 0x67, 0x83, 0x56, 0x60, 0x98, 0xfd, 0x60, 0xba, 0x91, 0x62, 0xbe, - 0xb7, 0x19, 0x6f, 0x55, 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x9f, 0x2e, 0xd2, 0xd3, - 0x2d, 0x46, 0x35, 0x2e, 0x7d, 0xeb, 0xe1, 0x5d, 0xfa, 0x85, 0x87, 0x75, 0xe9, 0x6f, 0xc0, 0x44, - 0x4d, 0xb3, 0x2b, 0x88, 0x67, 0xf2, 0x7c, 0xc7, 0x45, 0xa2, 0x99, 0x20, 0x70, 0xe9, 0xeb, 0x82, - 0x49, 0x04, 0x27, 0xa9, 0xa2, 0x4f, 0xc2, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xc1, 0x7b, 0x22, - 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0x69, 0xbd, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x7f, 0xb1, 0x00, 0x2d, - 0xb5, 0x36, 0x49, 0x93, 0x04, 0x4e, 0x23, 0x56, 0x0f, 0x7e, 0xc9, 0x82, 0x69, 0x92, 0x2a, 0x5e, - 0xf0, 0x9b, 0x4d, 0x21, 0x0c, 0xc8, 0x91, 0x57, 0x2d, 0xe5, 0xd4, 0x89, 0x1f, 0x04, 0x79, 0x18, - 0x38, 0xb7, 0x3d, 0xb4, 0x02, 0x47, 0xf8, 0x2d, 0xa9, 0x00, 0x9a, 0x95, 0xde, 0x23, 0x82, 0xf0, - 0x91, 0xb5, 0x34, 0x0a, 0xce, 0xaa, 0x67, 0xff, 0xd1, 0x18, 0xe4, 0xf6, 0xe2, 0x03, 0xbd, 0xe8, - 0x07, 0x7a, 0xd1, 0x0f, 0xf4, 0xa2, 0x1f, 0xe8, 0x45, 0x3f, 0xd0, 0x8b, 0x7e, 0xa0, 0x17, 0x7d, - 0x9f, 0xea, 0x45, 0x7f, 0xc6, 0x82, 0x63, 0xea, 0xfa, 0x32, 0x1e, 0xec, 0x9f, 0x83, 0x23, 0x7c, - 0xbb, 0x2d, 0x34, 0x1c, 0xb7, 0xb9, 0x46, 0x9a, 0xad, 0x86, 0x13, 0x49, 0xeb, 0xa7, 0x8b, 0x99, - 0x2b, 0x37, 0xe1, 0x62, 0x61, 0x54, 0xe4, 0xbe, 0x6a, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0xfb, - 0x43, 0xd0, 0xbf, 0xb4, 0x4d, 0xbc, 0xe8, 0x10, 0x9e, 0x36, 0x35, 0x18, 0x77, 0xbd, 0x6d, 0xbf, - 0xb1, 0x4d, 0xea, 0x1c, 0x7e, 0x90, 0x17, 0xf8, 0x71, 0x41, 0x7a, 0xbc, 0x6c, 0x90, 0xc0, 0x09, - 0x92, 0x0f, 0x43, 0xbb, 0x74, 0x19, 0x06, 0xf8, 0xe5, 0x23, 0x54, 0x4b, 0x99, 0x67, 0x36, 0x1b, - 0x44, 0x71, 0xa5, 0xc6, 0x9a, 0x2f, 0x7e, 0xb9, 0x89, 0xea, 0xe8, 0xb3, 0x30, 0xbe, 0xee, 0x06, - 0x61, 0xb4, 0xe6, 0x36, 0xe9, 0xd5, 0xd0, 0x6c, 0xdd, 0x87, 0x36, 0x49, 0x8d, 0xc3, 0xb2, 0x41, - 0x09, 0x27, 0x28, 0xa3, 0x0d, 0x18, 0x6b, 0x38, 0x7a, 0x53, 0x83, 0x07, 0x6e, 0x4a, 0xdd, 0x0e, - 0xd7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0xb7, 0x53, 0x8d, 0x29, 0x44, 0x86, 0x98, 0x38, 0x43, 0x6d, - 0x27, 0xae, 0x09, 0xe1, 0x30, 0xca, 0xa0, 0x31, 0x47, 0x85, 0x61, 0x93, 0x41, 0xd3, 0xdc, 0x11, - 0x3e, 0x03, 0xc3, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x2e, 0x98, 0x0b, 0xbd, 0xf5, 0x75, 0xc5, 0xad, - 0x05, 0xbe, 0xa9, 0xc7, 0x5b, 0x92, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57, - 0xe9, 0x0a, 0x3a, 0x4c, 0x23, 0x43, 0xe3, 0xce, 0x90, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, - 0x98, 0x28, 0x96, 0x5d, 0x06, 0xda, 0xf2, 0x9a, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x9b, 0x30, 0x18, - 0x90, 0x06, 0x53, 0x14, 0x8f, 0xf5, 0xbe, 0xc8, 0xb9, 0xde, 0x99, 0xd7, 0xc3, 0x92, 0x00, 0xba, - 0x0a, 0x28, 0x20, 0x94, 0xc1, 0x73, 0xbd, 0x0d, 0x65, 0xbe, 0x2f, 0x0e, 0x5a, 0xc5, 0x48, 0xe3, - 0x18, 0x43, 0xfa, 0xc1, 0xe2, 0x8c, 0x6a, 0xe8, 0x32, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e, - 0x3d, 0xe0, 0xb8, 0xb8, 0x5e, 0xc9, 0x57, 0x70, 0x12, 0x01, 0xa7, 0xeb, 0xd8, 0xbf, 0x61, 0x01, - 0x1f, 0xe7, 0x43, 0x90, 0x2a, 0xbc, 0x6e, 0x4a, 0x15, 0x4e, 0xe6, 0xce, 0x5c, 0x8e, 0x44, 0xe1, - 0x37, 0x2c, 0x18, 0xd1, 0x66, 0x36, 0x5e, 0xb3, 0x56, 0x87, 0x35, 0xdb, 0x86, 0x49, 0xba, 0xd2, - 0xaf, 0xdf, 0x0e, 0x49, 0xb0, 0x4d, 0xea, 0x6c, 0x61, 0x16, 0xee, 0x6f, 0x61, 0x2a, 0x53, 0xe1, - 0x6b, 0x09, 0x82, 0x38, 0xd5, 0x84, 0xfd, 0x19, 0xd9, 0x55, 0x65, 0x59, 0x5d, 0x53, 0x73, 0x9e, - 0xb0, 0xac, 0x56, 0xb3, 0x8a, 0x63, 0x1c, 0xba, 0xd5, 0x36, 0xfd, 0x30, 0x4a, 0x5a, 0x56, 0x5f, - 0xf1, 0xc3, 0x08, 0x33, 0x88, 0xfd, 0x02, 0xc0, 0xd2, 0x5d, 0x52, 0xe3, 0x2b, 0x56, 0x7f, 0xf4, - 0x58, 0xf9, 0x8f, 0x1e, 0xfb, 0x6f, 0x2c, 0x18, 0x5f, 0x5e, 0x30, 0x6e, 0xae, 0x59, 0x00, 0xfe, - 0x52, 0xbb, 0x75, 0x6b, 0x55, 0x9a, 0xf7, 0x70, 0x0b, 0x07, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x12, - 0x8a, 0x8d, 0xb6, 0x27, 0xc4, 0x9e, 0x83, 0xf4, 0x7a, 0xbc, 0xd6, 0xf6, 0x30, 0x2d, 0xd3, 0x7c, - 0xe0, 0x8a, 0x3d, 0xfb, 0xc0, 0x75, 0x0d, 0xc5, 0x83, 0x4a, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, - 0x11, 0x06, 0x84, 0xe9, 0xd1, 0xad, 0x5b, 0xe5, 0xc5, 0x10, 0xf3, 0x72, 0xfb, 0xcb, 0x45, 0x98, - 0x59, 0x6e, 0x90, 0xbb, 0xef, 0x31, 0xca, 0x42, 0xaf, 0x1e, 0x7c, 0x07, 0x13, 0x20, 0x1d, 0xd4, - 0x4b, 0xb3, 0xfb, 0x78, 0xac, 0xc3, 0x20, 0x37, 0x2c, 0x96, 0x31, 0x17, 0x32, 0xd5, 0xb9, 0xf9, - 0x03, 0x32, 0xcb, 0x0d, 0x94, 0x85, 0x3a, 0x57, 0x5d, 0x98, 0xa2, 0x14, 0x4b, 0xe2, 0x33, 0xaf, - 0xc0, 0xa8, 0x8e, 0x79, 0x20, 0x7f, 0xe9, 0x1f, 0x2e, 0xc2, 0x24, 0xed, 0xc1, 0x43, 0x9d, 0x88, - 0x1b, 0xe9, 0x89, 0x78, 0xd0, 0x3e, 0xb3, 0xdd, 0x67, 0xe3, 0x9d, 0xe4, 0x6c, 0x5c, 0xcc, 0x9b, - 0x8d, 0xc3, 0x9e, 0x83, 0x1f, 0xb1, 0xe0, 0xc8, 0x72, 0xc3, 0xaf, 0x6d, 0x25, 0xfc, 0x5a, 0x5f, - 0x82, 0x11, 0x7a, 0x1c, 0x87, 0x46, 0x88, 0x17, 0x23, 0xe8, 0x8f, 0x00, 0x61, 0x1d, 0x4f, 0xab, - 0x76, 0xe3, 0x46, 0x79, 0x31, 0x2b, 0x56, 0x90, 0x00, 0x61, 0x1d, 0xcf, 0xfe, 0x4b, 0x0b, 0x4e, - 0x5f, 0x5e, 0x58, 0x8a, 0x97, 0x62, 0x2a, 0x5c, 0xd1, 0x39, 0x18, 0x68, 0xd5, 0xb5, 0xae, 0xc4, - 0x62, 0xe1, 0x45, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0x44, 0x06, 0xbb, 0x01, 0x70, 0x19, 0x57, 0x16, - 0xc4, 0xb9, 0x2b, 0xb5, 0x40, 0x56, 0xae, 0x16, 0xe8, 0x09, 0x18, 0xa4, 0xf7, 0x82, 0x5b, 0x93, - 0xfd, 0xe6, 0x06, 0x1b, 0xbc, 0x08, 0x4b, 0x98, 0xfd, 0xeb, 0x16, 0x1c, 0xb9, 0xec, 0x46, 0xf4, - 0xd2, 0x4e, 0xc6, 0xe3, 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x93, 0x8c, 0xc7, 0x83, 0x15, - 0x04, 0x6b, 0x58, 0xfc, 0x83, 0xb6, 0x5d, 0xe6, 0x29, 0x53, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, - 0x85, 0x41, 0xc7, 0xab, 0xee, 0x06, 0x4c, 0x64, 0xb9, 0x23, 0x0e, 0x6e, 0x35, 0x5e, 0x8b, 0x12, - 0x80, 0x63, 0x1c, 0xfb, 0x9f, 0x2c, 0x28, 0x5d, 0xe6, 0xfe, 0xbe, 0xeb, 0x61, 0xce, 0xa1, 0xfb, - 0x02, 0x0c, 0x13, 0xa9, 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xb0, 0x40, 0x0a, - 0xaf, 0x07, 0xe7, 0xfb, 0x83, 0x79, 0x4f, 0x2f, 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x9c, 0x24, 0x16, - 0x70, 0x65, 0x29, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb7, 0xe0, 0x98, 0xfa, 0xe0, 0xf7, 0xdd, - 0x67, 0xda, 0xbf, 0x53, 0x80, 0xb1, 0x2b, 0x6b, 0x6b, 0x95, 0xcb, 0x24, 0xd2, 0x56, 0x65, 0x67, - 0xb5, 0x3f, 0xd6, 0xb4, 0x97, 0x9d, 0xde, 0x88, 0xed, 0xc8, 0x6d, 0xcc, 0xf2, 0xe8, 0x7f, 0xb3, - 0x65, 0x2f, 0xba, 0x1e, 0x54, 0xa3, 0xc0, 0xf5, 0x36, 0x32, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, - 0x3c, 0x0b, 0x7a, 0x01, 0x06, 0x58, 0xf8, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0xdd, - 0xdf, 0x2d, 0x0d, 0xdf, 0xc0, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x03, 0x46, 0x36, 0xa3, 0xa8, - 0x75, 0x85, 0x38, 0x75, 0x12, 0xc8, 0x53, 0xf6, 0x4c, 0xd6, 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5, - 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x05, 0x88, 0x61, 0x0f, 0x48, 0x71, 0x63, 0xaf, - 0xc1, 0x30, 0xfd, 0xdc, 0xb9, 0x86, 0xeb, 0x74, 0x56, 0x8d, 0x3f, 0x03, 0xc3, 0x52, 0xf1, 0x1d, - 0x8a, 0xe0, 0x20, 0xec, 0x46, 0x92, 0x7a, 0xf1, 0x10, 0xc7, 0x70, 0xfb, 0x71, 0x10, 0xb6, 0xc3, - 0x9d, 0x48, 0xda, 0xeb, 0x70, 0x94, 0x19, 0x41, 0x3b, 0xd1, 0xa6, 0xb1, 0x46, 0xbb, 0x2f, 0x86, - 0x67, 0xc5, 0xbb, 0xae, 0xa0, 0xec, 0x7d, 0xa4, 0xf3, 0xf9, 0xa8, 0xa4, 0x18, 0xbf, 0xf1, 0xec, - 0x7f, 0xec, 0x83, 0x47, 0xca, 0xd5, 0xfc, 0xa8, 0x56, 0x97, 0x60, 0x94, 0xb3, 0x8b, 0x74, 0x69, - 0x38, 0x0d, 0xd1, 0xae, 0x92, 0x80, 0xae, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0xd3, 0x50, 0x74, 0xdf, - 0xf5, 0x92, 0xae, 0x99, 0xe5, 0xb7, 0x56, 0x31, 0x2d, 0xa7, 0x60, 0xca, 0x79, 0xf2, 0x23, 0x5d, - 0x81, 0x15, 0xf7, 0xf9, 0x3a, 0x8c, 0xbb, 0x61, 0x2d, 0x74, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, - 0x57, 0x32, 0x07, 0xda, 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xef, 0x99, 0x7b, 0xed, - 0x1a, 0x53, 0x83, 0x1e, 0xff, 0x2d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, - 0x88, 0x25, 0x8c, 0x3e, 0xe8, 0x6a, 0x9b, 0x4e, 0x6b, 0xae, 0x1d, 0x6d, 0x2e, 0xba, 0x61, 0xcd, - 0xdf, 0x26, 0xc1, 0x0e, 0x7b, 0x8b, 0x0f, 0xc5, 0x0f, 0x3a, 0x05, 0x58, 0xb8, 0x32, 0x57, 0xa1, - 0x98, 0x38, 0x5d, 0x07, 0xcd, 0xc1, 0x84, 0x2c, 0xac, 0x92, 0x90, 0x5d, 0x01, 0x23, 0x8c, 0x8c, - 0x72, 0x96, 0x14, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, - 0x98, 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x97, 0x75, - 0x00, 0x36, 0xf1, 0xec, 0xff, 0xb3, 0x0f, 0xa6, 0xd8, 0xb4, 0x7d, 0xb0, 0xc2, 0xbe, 0x97, 0x56, - 0xd8, 0x8d, 0xf4, 0x0a, 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc5, 0x64, - 0xdc, 0xc6, 0x32, 0xbb, 0x00, 0xc3, 0x81, 0xe1, 0xc7, 0x3a, 0xac, 0x2b, 0xb5, 0xa4, 0x4b, 0x6a, - 0x8c, 0x83, 0xde, 0x00, 0x68, 0xc5, 0x32, 0xf4, 0x82, 0x11, 0x7c, 0x14, 0x72, 0xc5, 0xe7, 0x5a, - 0x1d, 0xfb, 0xb3, 0x30, 0xac, 0x1c, 0x55, 0xa5, 0xa7, 0xba, 0x95, 0xe3, 0xa9, 0xde, 0x9d, 0x8d, - 0x90, 0xb6, 0x71, 0xc5, 0x4c, 0xdb, 0xb8, 0xff, 0xcb, 0x82, 0x58, 0xc3, 0x81, 0xde, 0x82, 0xe1, - 0x96, 0xcf, 0x4c, 0xa9, 0x03, 0xe9, 0x9f, 0xf0, 0x78, 0x47, 0x15, 0x09, 0x8f, 0x30, 0x18, 0xf0, - 0xe9, 0xa8, 0xc8, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc2, 0x60, 0x2b, 0x20, 0xd5, 0x88, 0x85, 0xbf, - 0xea, 0x9d, 0x20, 0x5f, 0xbe, 0xbc, 0x22, 0x96, 0x14, 0x12, 0x96, 0xa9, 0xc5, 0xde, 0x2d, 0x53, - 0xed, 0xdf, 0x2a, 0xc0, 0x64, 0xb2, 0x11, 0xf4, 0x1a, 0xf4, 0x91, 0xbb, 0xa4, 0x26, 0xbe, 0x34, - 0x93, 0x9b, 0x88, 0xa5, 0x2b, 0x7c, 0xe8, 0xe8, 0x7f, 0xcc, 0x6a, 0xa1, 0x2b, 0x30, 0x48, 0x59, - 0x89, 0xcb, 0x2a, 0x48, 0xe4, 0xa3, 0x79, 0xec, 0x88, 0xe2, 0xc9, 0xf8, 0x67, 0x89, 0x22, 0x2c, - 0xab, 0x33, 0x53, 0xb6, 0x5a, 0xab, 0x4a, 0x5f, 0x69, 0x51, 0x27, 0x61, 0xc2, 0xda, 0x42, 0x85, - 0x23, 0x09, 0x6a, 0xdc, 0x94, 0x4d, 0x16, 0xe2, 0x98, 0x08, 0x7a, 0x03, 0xfa, 0xc3, 0x06, 0x21, - 0x2d, 0x61, 0xab, 0x90, 0x29, 0x1f, 0xad, 0x52, 0x04, 0x41, 0x89, 0xc9, 0x53, 0x58, 0x01, 0xe6, - 0x15, 0xed, 0xdf, 0xb5, 0x00, 0xb8, 0xed, 0x9f, 0xe3, 0x6d, 0x90, 0x43, 0x50, 0x29, 0x2c, 0x42, - 0x5f, 0xd8, 0x22, 0xb5, 0x4e, 0x1e, 0x06, 0x71, 0x7f, 0xaa, 0x2d, 0x52, 0x8b, 0x57, 0x3b, 0xfd, - 0x87, 0x59, 0x6d, 0xfb, 0x47, 0x01, 0xc6, 0x63, 0xb4, 0x72, 0x44, 0x9a, 0xe8, 0x39, 0x23, 0xb2, - 0xce, 0xc9, 0x44, 0x64, 0x9d, 0x61, 0x86, 0xad, 0x49, 0xaf, 0x3f, 0x0b, 0xc5, 0xa6, 0x73, 0x57, - 0x88, 0x27, 0x9f, 0xe9, 0xdc, 0x0d, 0x4a, 0x7f, 0x76, 0xc5, 0xb9, 0xcb, 0x5f, 0xf0, 0xcf, 0xc8, - 0xdd, 0xb9, 0xe2, 0xdc, 0xed, 0x6a, 0x05, 0x4f, 0x1b, 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0x6b, 0xeb, - 0xa9, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x3d, 0xb4, 0xe5, 0x7a, 0xe8, 0x1e, 0x0c, 0x0a, 0xab, - 0x53, 0x11, 0xf2, 0xef, 0x42, 0x0f, 0xed, 0x09, 0xa3, 0x55, 0xde, 0xe6, 0x05, 0x29, 0xa1, 0x10, - 0xa5, 0x5d, 0xdb, 0x95, 0x0d, 0xa2, 0xff, 0xd4, 0x82, 0x71, 0xf1, 0x1b, 0x93, 0x77, 0xdb, 0x24, - 0x8c, 0x04, 0x07, 0xff, 0x91, 0xde, 0xfb, 0x20, 0x2a, 0xf2, 0xae, 0x7c, 0x44, 0x5e, 0xb6, 0x26, - 0xb0, 0x6b, 0x8f, 0x12, 0xbd, 0x40, 0xbf, 0x65, 0xc1, 0xd1, 0xa6, 0x73, 0x97, 0xb7, 0xc8, 0xcb, - 0xb0, 0x13, 0xb9, 0xbe, 0xb0, 0xde, 0x78, 0xad, 0xb7, 0xe9, 0x4f, 0x55, 0xe7, 0x9d, 0x94, 0xaa, - 0xda, 0xa3, 0x59, 0x28, 0x5d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, 0x75, 0x18, 0x92, 0xeb, 0xed, 0x61, - 0x9a, 0xd4, 0xb3, 0x76, 0xc4, 0x5a, 0x7b, 0xa8, 0xed, 0x7c, 0x16, 0x46, 0xf5, 0x35, 0xf6, 0x50, - 0xdb, 0x7a, 0x17, 0x8e, 0x64, 0xac, 0xa5, 0x87, 0xda, 0xe4, 0x1d, 0x38, 0x99, 0xbb, 0x3e, 0x1e, - 0xaa, 0x4b, 0xc4, 0xef, 0x58, 0xfa, 0x39, 0x78, 0x08, 0x7a, 0x9d, 0x05, 0x53, 0xaf, 0x73, 0xa6, - 0xf3, 0xce, 0xc9, 0x51, 0xee, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x68, 0xd0, - 0x12, 0x69, 0xbb, 0x6c, 0x77, 0xdf, 0x91, 0x31, 0x47, 0xcd, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0x5f, - 0xb1, 0x20, 0xc3, 0xa9, 0x83, 0x72, 0x58, 0x6d, 0xb7, 0xce, 0x86, 0xa4, 0x18, 0x73, 0x58, 0x2a, - 0xf0, 0xcc, 0x69, 0x28, 0x6e, 0xb8, 0x75, 0xe1, 0xcd, 0xac, 0xc0, 0x97, 0x29, 0x78, 0xc3, 0xad, - 0xa3, 0x65, 0x40, 0x61, 0xbb, 0xd5, 0x6a, 0x30, 0x83, 0x27, 0xa7, 0x71, 0x39, 0xf0, 0xdb, 0x2d, - 0x6e, 0xa8, 0x5c, 0xe4, 0xe2, 0xa5, 0x6a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, 0x7f, 0x60, 0x41, 0xdf, - 0x21, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x53, 0xc4, 0x2c, 0x76, 0xee, 0x2c, - 0xdd, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0x76, 0x2d, 0x38, 0x72, 0xcd, 0x77, 0xea, - 0xf3, 0x4e, 0xc3, 0xf1, 0x6a, 0x24, 0x28, 0x7b, 0x1b, 0x07, 0xf2, 0x0a, 0x28, 0x74, 0xf5, 0x0a, - 0xb8, 0x04, 0x03, 0x6e, 0x4b, 0x0b, 0x35, 0x7f, 0x96, 0xce, 0x6e, 0xb9, 0x22, 0xa2, 0xcc, 0x23, - 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x1c, 0xaf, 0x2f, 0x7f, 0x59, 0xd2, 0x57, - 0x52, 0x32, 0x84, 0x9a, 0x61, 0x38, 0xbe, 0x09, 0x46, 0x13, 0xc2, 0x4d, 0x0a, 0xc3, 0xa0, 0xcb, - 0xbf, 0x54, 0xac, 0xcd, 0x27, 0xb3, 0x5f, 0x2f, 0xa9, 0x81, 0xd1, 0xfc, 0x01, 0x79, 0x01, 0x96, - 0x84, 0xec, 0x4b, 0x90, 0x19, 0xf2, 0xa6, 0xbb, 0x64, 0xca, 0xfe, 0x04, 0x4c, 0xb1, 0x9a, 0x07, - 0x94, 0xfa, 0xd8, 0x09, 0x79, 0x7a, 0x46, 0xd4, 0x60, 0xfb, 0x7f, 0xb5, 0x00, 0xad, 0xf8, 0x75, - 0x77, 0x7d, 0x47, 0x10, 0xe7, 0xdf, 0xff, 0x2e, 0x94, 0xf8, 0xb3, 0x3a, 0x19, 0x59, 0x77, 0xa1, - 0xe1, 0x84, 0xa1, 0x26, 0xcb, 0x7f, 0x52, 0xb4, 0x5b, 0x5a, 0xeb, 0x8c, 0x8e, 0xbb, 0xd1, 0x43, - 0x6f, 0x25, 0x02, 0x1d, 0x7e, 0x34, 0x15, 0xe8, 0xf0, 0xc9, 0x4c, 0x8b, 0x9a, 0x74, 0xef, 0x65, - 0x00, 0x44, 0xfb, 0x8b, 0x16, 0x4c, 0xac, 0x26, 0x22, 0xc5, 0x9e, 0x63, 0xe6, 0x05, 0x19, 0x3a, - 0xaa, 0x2a, 0x2b, 0xc5, 0x02, 0xfa, 0xc0, 0x65, 0xb8, 0xdf, 0xb6, 0x20, 0x0e, 0xb1, 0x75, 0x08, - 0x2c, 0xf7, 0x82, 0xc1, 0x72, 0x67, 0x3e, 0x5f, 0x54, 0x77, 0xf2, 0x38, 0x6e, 0x74, 0x55, 0xcd, - 0x49, 0x87, 0x97, 0x4b, 0x4c, 0x86, 0xef, 0xb3, 0x71, 0x73, 0xe2, 0xd4, 0x6c, 0x7c, 0xa3, 0x00, - 0x48, 0xe1, 0xf6, 0x1c, 0x1c, 0x33, 0x5d, 0xe3, 0xc1, 0x04, 0xc7, 0xdc, 0x06, 0xc4, 0x0c, 0x64, - 0x02, 0xc7, 0x0b, 0x39, 0x59, 0x57, 0x48, 0xad, 0x0f, 0x66, 0x7d, 0x33, 0x23, 0xbd, 0x65, 0xaf, - 0xa5, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xc3, 0xa7, 0xfe, 0x5e, 0x0d, 0x9f, 0x06, 0xba, 0xb8, 0x7d, - 0x7f, 0xcd, 0x82, 0x31, 0x35, 0x4c, 0xef, 0x13, 0xe7, 0x11, 0xd5, 0x9f, 0x9c, 0x7b, 0xa5, 0xa2, - 0x75, 0x99, 0x31, 0x03, 0xdf, 0xc7, 0xdc, 0xf7, 0x9d, 0x86, 0x7b, 0x8f, 0xa8, 0x18, 0xce, 0x25, - 0xe1, 0x8e, 0x2f, 0x4a, 0xf7, 0x77, 0x4b, 0x63, 0xea, 0x1f, 0x8f, 0x1a, 0x1b, 0x57, 0xb1, 0x7f, - 0x99, 0x6e, 0x76, 0x73, 0x29, 0xa2, 0x97, 0xa0, 0xbf, 0xb5, 0xe9, 0x84, 0x24, 0xe1, 0x64, 0xd7, - 0x5f, 0xa1, 0x85, 0xfb, 0xbb, 0xa5, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7b, 0xc8, 0xd1, - 0xf4, 0xe2, 0xec, 0x1a, 0x72, 0xf4, 0x5f, 0x2c, 0xe8, 0x5b, 0xa5, 0xb7, 0xd7, 0xc3, 0x3f, 0x02, - 0x5e, 0x37, 0x8e, 0x80, 0x53, 0x79, 0xd9, 0x8c, 0x72, 0x77, 0xff, 0x72, 0x62, 0xf7, 0x9f, 0xc9, - 0xa5, 0xd0, 0x79, 0xe3, 0x37, 0x61, 0x84, 0xe5, 0x48, 0x12, 0x0e, 0x85, 0x2f, 0x18, 0x1b, 0xbe, - 0x94, 0xd8, 0xf0, 0x13, 0x1a, 0xaa, 0xb6, 0xd3, 0x9f, 0x82, 0x41, 0xe1, 0xa1, 0x96, 0x8c, 0x82, - 0x20, 0x70, 0xb1, 0x84, 0xdb, 0xbf, 0x50, 0x04, 0x23, 0x27, 0x13, 0xfa, 0x23, 0x0b, 0x66, 0x03, - 0x6e, 0xb9, 0x5e, 0x5f, 0x6c, 0x07, 0xae, 0xb7, 0x51, 0xad, 0x6d, 0x92, 0x7a, 0xbb, 0xe1, 0x7a, - 0x1b, 0xe5, 0x0d, 0xcf, 0x57, 0xc5, 0x4b, 0x77, 0x49, 0xad, 0xcd, 0xb4, 0xca, 0x5d, 0x12, 0x40, - 0x29, 0x0f, 0x90, 0xe7, 0xf7, 0x76, 0x4b, 0xb3, 0xf8, 0x40, 0xb4, 0xf1, 0x01, 0xfb, 0x82, 0xfe, - 0xd2, 0x82, 0x0b, 0x3c, 0x37, 0x50, 0xef, 0xfd, 0xef, 0x20, 0xe1, 0xa8, 0x48, 0x52, 0x31, 0x91, - 0x35, 0x12, 0x34, 0xe7, 0x5f, 0x16, 0x03, 0x7a, 0xa1, 0x72, 0xb0, 0xb6, 0xf0, 0x41, 0x3b, 0x67, - 0xff, 0xb7, 0x45, 0x18, 0x13, 0xa1, 0x29, 0xc5, 0x1d, 0xf0, 0x92, 0xb1, 0x24, 0x1e, 0x4d, 0x2c, - 0x89, 0x29, 0x03, 0xf9, 0xc1, 0x1c, 0xff, 0x21, 0x4c, 0xd1, 0xc3, 0xf9, 0x0a, 0x71, 0x82, 0xe8, - 0x36, 0x71, 0xb8, 0x3d, 0x63, 0xf1, 0xc0, 0xa7, 0xbf, 0x12, 0xac, 0x5f, 0x4b, 0x12, 0xc3, 0x69, - 0xfa, 0xdf, 0x4b, 0x77, 0x8e, 0x07, 0x93, 0xa9, 0xe8, 0xa2, 0x6f, 0xc3, 0xb0, 0x72, 0xaf, 0x12, - 0x87, 0x4e, 0xe7, 0x20, 0xbd, 0x49, 0x0a, 0x5c, 0xe8, 0x19, 0xbb, 0xf6, 0xc5, 0xe4, 0xec, 0xdf, - 0x2e, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x85, 0x21, 0x27, 0x64, 0x81, 0xc3, 0xeb, 0x9d, 0x24, 0xda, - 0xa9, 0x66, 0x98, 0x8b, 0xdb, 0x9c, 0xa8, 0x89, 0x15, 0x0d, 0x74, 0x85, 0x5b, 0x8d, 0x6e, 0x93, - 0x4e, 0xe2, 0xec, 0x14, 0x35, 0x90, 0x76, 0xa5, 0xdb, 0x04, 0x8b, 0xfa, 0xe8, 0x53, 0xdc, 0xac, - 0xf7, 0xaa, 0xe7, 0xdf, 0xf1, 0x2e, 0xfb, 0xbe, 0x0c, 0x43, 0xd4, 0x1b, 0xc1, 0x29, 0x69, 0xcc, - 0xab, 0xaa, 0x63, 0x93, 0x5a, 0x6f, 0xe1, 0xba, 0x3f, 0x07, 0x2c, 0x17, 0x8a, 0x19, 0xcd, 0x20, - 0x44, 0x04, 0x26, 0x44, 0xdc, 0x53, 0x59, 0x26, 0xc6, 0x2e, 0xf3, 0xf9, 0x6d, 0xd6, 0x8e, 0x35, - 0x40, 0x57, 0x4d, 0x12, 0x38, 0x49, 0xd3, 0xde, 0xe4, 0x87, 0xf0, 0x32, 0x71, 0xa2, 0x76, 0x40, - 0x42, 0xf4, 0x71, 0x98, 0x4e, 0xbf, 0x8c, 0x85, 0x22, 0xc5, 0x62, 0xdc, 0xf3, 0xa9, 0xbd, 0xdd, - 0xd2, 0x74, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x7f, 0xcd, 0x02, 0xe6, 0x43, 0x7e, 0x08, 0x9c, - 0xcf, 0xc7, 0x4c, 0xce, 0x67, 0x3a, 0x6f, 0x3a, 0x73, 0x98, 0x9e, 0x17, 0xf9, 0x1a, 0xae, 0x04, - 0xfe, 0xdd, 0x1d, 0x61, 0xf5, 0xd5, 0xfd, 0x19, 0x67, 0x7f, 0xd9, 0x02, 0x96, 0x38, 0x08, 0xf3, - 0x57, 0xbb, 0x54, 0x70, 0x74, 0x37, 0x68, 0xf8, 0x38, 0x0c, 0xad, 0x8b, 0xe1, 0xcf, 0x10, 0x3a, - 0x19, 0x1d, 0x36, 0x69, 0xcb, 0x49, 0x13, 0xbe, 0xa0, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2, - 0x82, 0x99, 0xfc, 0x6a, 0xe8, 0x06, 0x9c, 0x08, 0x48, 0xad, 0x1d, 0x84, 0x74, 0x4b, 0x88, 0x07, - 0x90, 0x70, 0xa7, 0xe2, 0x53, 0xfd, 0xc8, 0xde, 0x6e, 0xe9, 0x04, 0xce, 0x46, 0xc1, 0x79, 0x75, - 0xd1, 0x2b, 0x30, 0xde, 0x0e, 0x39, 0xe7, 0xc7, 0x98, 0xae, 0x50, 0x44, 0xa7, 0x66, 0x1e, 0x47, - 0x37, 0x0c, 0x08, 0x4e, 0x60, 0xda, 0x3f, 0xc0, 0x97, 0xa3, 0x0a, 0x50, 0xdd, 0x84, 0x29, 0x4f, - 0xfb, 0x4f, 0x6f, 0x40, 0xf9, 0xd4, 0x7f, 0xbc, 0xdb, 0xad, 0xcf, 0xae, 0x4b, 0xcd, 0xcb, 0x3d, - 0x41, 0x06, 0xa7, 0x29, 0xdb, 0xbf, 0x68, 0xc1, 0x09, 0x1d, 0x51, 0x73, 0xa4, 0xeb, 0xa6, 0x05, - 0x5c, 0x84, 0x21, 0xbf, 0x45, 0x02, 0x27, 0xf2, 0x03, 0x71, 0xcd, 0x9d, 0x97, 0x2b, 0xf4, 0xba, - 0x28, 0xdf, 0x17, 0x09, 0x73, 0x24, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x64, 0xc3, 0x00, 0x13, 0x20, - 0x86, 0xc2, 0x65, 0x92, 0x1d, 0x5a, 0xcc, 0xb2, 0x25, 0xc4, 0x02, 0x62, 0xff, 0xa3, 0xc5, 0xd7, - 0xa7, 0xde, 0x75, 0xf4, 0x2e, 0x4c, 0x36, 0x9d, 0xa8, 0xb6, 0xb9, 0x74, 0xb7, 0x15, 0x70, 0xe5, - 0xae, 0x1c, 0xa7, 0x67, 0xba, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x69, 0xf5, 0x4a, 0x82, 0x18, 0x4e, - 0x91, 0x47, 0xb7, 0x61, 0x84, 0x95, 0x31, 0x6f, 0xe0, 0xb0, 0x13, 0x2f, 0x93, 0xd7, 0x9a, 0x32, - 0x0e, 0x5a, 0x89, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xd5, 0x22, 0x3f, 0x34, 0xd8, 0xdb, 0xe3, 0x29, - 0x18, 0x6c, 0xf9, 0xf5, 0x85, 0xf2, 0x22, 0x16, 0xb3, 0xa0, 0xee, 0xbd, 0x0a, 0x2f, 0xc6, 0x12, - 0x8e, 0xce, 0xc3, 0x90, 0xf8, 0x29, 0x95, 0xf1, 0x6c, 0x8f, 0x08, 0xbc, 0x10, 0x2b, 0x28, 0x7a, - 0x1e, 0xa0, 0x15, 0xf8, 0xdb, 0x6e, 0x9d, 0x45, 0x7f, 0x2a, 0x9a, 0x76, 0x7d, 0x15, 0x05, 0xc1, - 0x1a, 0x16, 0x7a, 0x15, 0xc6, 0xda, 0x5e, 0xc8, 0xf9, 0x27, 0x2d, 0xc6, 0xbe, 0xb2, 0x38, 0xbb, - 0xa1, 0x03, 0xb1, 0x89, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, 0x9d, 0x5a, 0x7f, 0xbe, 0xf9, 0xfd, - 0x1a, 0xc5, 0xd0, 0xb3, 0xd9, 0xd1, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x4b, 0xc7, 0x7c, 0x7e, 0x13, - 0x09, 0xbf, 0x97, 0xde, 0x6e, 0x2d, 0xcd, 0x2d, 0x5f, 0xf8, 0xd3, 0x18, 0xb4, 0xd0, 0x2b, 0x00, - 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0x69, 0x28, 0xeb, 0x52, 0xc5, 0xc8, 0x2c, 0xfa, 0xab, 0x7e, 0x74, - 0x23, 0x24, 0x4b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x8e, 0x00, 0xc4, 0x0f, 0x0d, 0x74, 0x0f, - 0x86, 0x6a, 0x4e, 0xcb, 0xa9, 0xf1, 0x54, 0xad, 0xc5, 0x3c, 0x7f, 0xe9, 0xb8, 0xc6, 0xec, 0x82, - 0x40, 0xe7, 0xca, 0x1b, 0x19, 0xa6, 0x7c, 0x48, 0x16, 0x77, 0x55, 0xd8, 0xa8, 0xf6, 0xd0, 0x17, - 0x2c, 0x18, 0x11, 0xd1, 0x95, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, 0xfb, 0x73, 0x71, 0x0d, - 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x5d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x2c, 0xdf, 0xb6, - 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x98, 0x5d, 0x35, 0xfa, 0xb3, 0xf6, 0x86, 0xf1, 0xac, 0xed, - 0xcb, 0xf7, 0x3c, 0x36, 0xf8, 0xed, 0x6e, 0x2f, 0x5a, 0x54, 0xd1, 0xa3, 0x90, 0xf4, 0xe7, 0xbb, - 0xcb, 0x6a, 0x0f, 0xbb, 0x2e, 0x11, 0x48, 0x3e, 0x0b, 0x13, 0x75, 0x93, 0x6b, 0x11, 0x2b, 0xf1, - 0xc9, 0x3c, 0xba, 0x09, 0x26, 0x27, 0xe6, 0x53, 0x12, 0x00, 0x9c, 0x24, 0x8c, 0x2a, 0x3c, 0x28, - 0x4d, 0xd9, 0x5b, 0xf7, 0x85, 0xef, 0x95, 0x9d, 0x3b, 0x97, 0x3b, 0x61, 0x44, 0x9a, 0x14, 0x33, - 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0x61, 0x80, 0xf9, 0x4b, 0x86, 0xd3, 0x43, - 0xf9, 0x6a, 0x0d, 0x33, 0xfa, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, 0x8a, 0xf4, - 0x46, 0x0e, 0xcb, 0xde, 0x8d, 0x90, 0x30, 0x6f, 0xe4, 0xe1, 0xf9, 0xc7, 0x63, 0x47, 0x63, 0x5e, - 0x9e, 0x99, 0xf3, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xd2, 0x15, 0xb1, 0xe2, 0x32, - 0xbb, 0x67, 0xa6, 0xdb, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, 0x52, 0x16, 0x9a, 0xef, - 0x7a, 0xe1, 0xbd, 0xd5, 0xed, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x23, - 0x17, 0x26, 0x02, 0x83, 0xbd, 0x90, 0x21, 0xde, 0xce, 0xf5, 0xc6, 0xc4, 0x68, 0xc9, 0x03, 0x4c, - 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x63, 0x9d, 0x5f, 0xfe, 0xdd, 0x58, 0xa3, 0x99, - 0x2d, 0x18, 0x33, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x32, 0x79, 0xb2, 0x3c, 0x54, 0xcd, - 0xe3, 0x2b, 0x30, 0xce, 0x36, 0xc2, 0x1d, 0xa7, 0x25, 0x8e, 0xe2, 0xf3, 0xc6, 0x51, 0x6c, 0x9d, - 0x2f, 0xf2, 0x81, 0x91, 0x43, 0x10, 0x1f, 0x9c, 0xf6, 0xaf, 0xf4, 0x8b, 0xca, 0x6a, 0x17, 0xa1, - 0x0b, 0x30, 0x2c, 0x3a, 0xa0, 0x32, 0x70, 0xa9, 0x83, 0x61, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, - 0xaf, 0xb1, 0xea, 0x9a, 0x87, 0x42, 0x9c, 0x78, 0x4d, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xbf, 0xb7, - 0x7d, 0x3f, 0x52, 0x77, 0xb0, 0xda, 0x6a, 0xf3, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x7b, 0xb7, 0x48, - 0xe0, 0x91, 0x86, 0x99, 0x82, 0x42, 0xdd, 0xbd, 0x57, 0x75, 0x20, 0x36, 0x71, 0x29, 0x07, 0xe1, - 0x87, 0x6c, 0xef, 0x8a, 0x27, 0x76, 0xec, 0xf1, 0x51, 0xe5, 0xb1, 0x2b, 0x24, 0x1c, 0x7d, 0x02, - 0x4e, 0xa8, 0x70, 0x8f, 0x62, 0x65, 0xca, 0x16, 0x07, 0x0c, 0x89, 0xd8, 0x89, 0x85, 0x6c, 0x34, - 0x9c, 0x57, 0x1f, 0xbd, 0x0e, 0xe3, 0xe2, 0x19, 0x26, 0x29, 0x0e, 0x9a, 0xe6, 0x8b, 0x57, 0x0d, - 0x28, 0x4e, 0x60, 0xcb, 0x24, 0x1a, 0xec, 0x7d, 0x22, 0x29, 0x0c, 0xa5, 0x93, 0x68, 0xe8, 0x70, - 0x9c, 0xaa, 0x81, 0xe6, 0x60, 0x82, 0xb3, 0x9d, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0xf8, 0x93, 0xaa, - 0x0d, 0x79, 0xdd, 0x04, 0xe3, 0x24, 0x3e, 0xba, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x74, 0x23, 0x52, - 0xa3, 0xbb, 0x8a, 0x59, 0x10, 0x6a, 0xf6, 0x9f, 0x73, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0d, 0xe8, - 0x0b, 0xef, 0x38, 0x2d, 0x71, 0xfa, 0xe4, 0x1f, 0xe5, 0x6a, 0x05, 0x73, 0xd3, 0x2f, 0xfa, 0x1f, - 0xb3, 0x9a, 0xf6, 0x3d, 0x38, 0x92, 0x11, 0x16, 0x87, 0x2e, 0x3d, 0xa7, 0xe5, 0xca, 0x51, 0x49, - 0xb8, 0x69, 0xcc, 0x55, 0xca, 0x72, 0x3c, 0x34, 0x2c, 0xba, 0xbe, 0x59, 0xf8, 0x1c, 0x2d, 0xdd, - 0xb8, 0x5a, 0xdf, 0xcb, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x5f, 0x0b, 0x30, 0x91, 0xa1, 0x1e, 0x64, - 0x29, 0xaf, 0x13, 0xef, 0xbc, 0x38, 0xc3, 0xb5, 0x99, 0xd5, 0xa5, 0x70, 0x80, 0xac, 0x2e, 0xc5, - 0x6e, 0x59, 0x5d, 0xfa, 0xde, 0x4b, 0x56, 0x17, 0x73, 0xc4, 0xfa, 0x7b, 0x1a, 0xb1, 0x8c, 0x4c, - 0x30, 0x03, 0x07, 0xcc, 0x04, 0x63, 0x0c, 0xfa, 0x60, 0x0f, 0x83, 0xfe, 0xd3, 0x05, 0x98, 0x4c, - 0x6a, 0x16, 0x0f, 0x41, 0x3a, 0xff, 0xa6, 0x21, 0x9d, 0x3f, 0xdf, 0x4b, 0x04, 0x81, 0x5c, 0x49, - 0x3d, 0x4e, 0x48, 0xea, 0x9f, 0xee, 0x89, 0x5a, 0x67, 0xa9, 0xfd, 0x2f, 0x15, 0xe0, 0x58, 0xa6, - 0xc2, 0xf5, 0x10, 0xc6, 0xe6, 0xba, 0x31, 0x36, 0xcf, 0xf5, 0x1c, 0x5d, 0x21, 0x77, 0x80, 0x6e, - 0x25, 0x06, 0xe8, 0x42, 0xef, 0x24, 0x3b, 0x8f, 0xd2, 0x37, 0x8b, 0x70, 0x26, 0xb3, 0x5e, 0x2c, - 0xdc, 0x5e, 0x36, 0x84, 0xdb, 0xcf, 0x27, 0x84, 0xdb, 0x76, 0xe7, 0xda, 0x0f, 0x46, 0xda, 0x2d, - 0xa2, 0x0c, 0xb0, 0x58, 0x29, 0xf7, 0x29, 0xe9, 0x36, 0xa2, 0x0c, 0x28, 0x42, 0xd8, 0xa4, 0xfb, - 0xbd, 0x24, 0xe1, 0xfe, 0x1f, 0x2d, 0x38, 0x99, 0x39, 0x37, 0x87, 0x20, 0x67, 0x5c, 0x35, 0xe5, - 0x8c, 0x4f, 0xf5, 0xbc, 0x5a, 0x73, 0x04, 0x8f, 0x5f, 0x1c, 0xc8, 0xf9, 0x16, 0x26, 0xfe, 0xb8, - 0x0e, 0x23, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xc5, 0xaf, 0xab, 0x04, 0x10, 0xcf, 0xb1, 0xc7, 0x69, - 0x5c, 0xbc, 0xbf, 0x5b, 0x9a, 0x49, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x05, 0x43, 0xa1, - 0xcc, 0xdd, 0xd9, 0x77, 0xff, 0xb9, 0x3b, 0x19, 0x27, 0xa9, 0xc4, 0x3b, 0x8a, 0x24, 0xfa, 0x7e, - 0x3d, 0x6a, 0x55, 0x07, 0xc1, 0x26, 0xef, 0xe4, 0x7d, 0xc4, 0xae, 0x7a, 0x1e, 0x60, 0x5b, 0xbd, - 0xa3, 0x92, 0xa2, 0x1b, 0xed, 0x85, 0xa5, 0x61, 0xa1, 0x37, 0x60, 0x32, 0xe4, 0x01, 0x5b, 0x63, - 0x13, 0x19, 0xbe, 0x16, 0x59, 0xcc, 0xbb, 0x6a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x59, 0xb6, 0xca, - 0x8c, 0xa1, 0xf8, 0xf2, 0x3c, 0x17, 0xb7, 0x28, 0x0c, 0xa2, 0x8e, 0x26, 0x27, 0x81, 0x0d, 0xbf, - 0x56, 0x13, 0x7d, 0x0a, 0x80, 0x2e, 0x22, 0x21, 0xc2, 0x19, 0xcc, 0x3f, 0x42, 0xe9, 0xd9, 0x52, - 0xcf, 0xf4, 0xc0, 0x60, 0xe1, 0x01, 0x16, 0x15, 0x11, 0xac, 0x11, 0x44, 0x0e, 0x8c, 0xc5, 0xff, - 0xe2, 0xac, 0xf4, 0xe7, 0x73, 0x5b, 0x48, 0x12, 0x67, 0xea, 0x8d, 0x45, 0x9d, 0x04, 0x36, 0x29, - 0xa2, 0x4f, 0xc2, 0xc9, 0xed, 0x5c, 0xbb, 0x23, 0xce, 0x4b, 0xb2, 0x34, 0xf3, 0xf9, 0xd6, 0x46, - 0xf9, 0xf5, 0xed, 0xff, 0x09, 0xe0, 0x91, 0x0e, 0x27, 0x3d, 0x9a, 0x33, 0x6d, 0x06, 0x9e, 0x49, - 0xca, 0x55, 0x66, 0x32, 0x2b, 0x1b, 0x82, 0x96, 0xc4, 0x86, 0x2a, 0xbc, 0xe7, 0x0d, 0xf5, 0x13, - 0x96, 0xf6, 0xcc, 0xe2, 0x16, 0xe5, 0x1f, 0x3b, 0xe0, 0x0d, 0xf6, 0x00, 0x45, 0x60, 0xeb, 0x19, - 0x72, 0xa4, 0xe7, 0x7b, 0xee, 0x4e, 0xef, 0x82, 0xa5, 0xdf, 0xc9, 0x0e, 0x71, 0xcf, 0x45, 0x4c, - 0x97, 0x0f, 0xfa, 0xfd, 0x87, 0x15, 0xee, 0xfe, 0x1b, 0x16, 0x9c, 0x4c, 0x15, 0xf3, 0x3e, 0x90, - 0x50, 0x44, 0xe9, 0x5b, 0x7d, 0xcf, 0x9d, 0x97, 0x04, 0xf9, 0x37, 0x5c, 0x11, 0xdf, 0x70, 0x32, - 0x17, 0x2f, 0xd9, 0xf5, 0x2f, 0xfd, 0x7d, 0xe9, 0x08, 0x6b, 0xc0, 0x44, 0xc4, 0xf9, 0x5d, 0x47, - 0x2d, 0x38, 0x5b, 0x6b, 0x07, 0x41, 0xbc, 0x58, 0x33, 0x36, 0x27, 0x7f, 0x2d, 0x3e, 0xbe, 0xb7, - 0x5b, 0x3a, 0xbb, 0xd0, 0x05, 0x17, 0x77, 0xa5, 0x86, 0x3c, 0x40, 0xcd, 0x94, 0x75, 0x1f, 0x3b, - 0x00, 0x72, 0xa4, 0x40, 0x69, 0x5b, 0x40, 0x6e, 0xa7, 0x9b, 0x61, 0x23, 0x98, 0x41, 0xf9, 0x70, - 0x65, 0x37, 0xdf, 0x99, 0x78, 0xfa, 0x33, 0xd7, 0xe0, 0x4c, 0xe7, 0xc5, 0x74, 0xa0, 0x10, 0x14, - 0x7f, 0x63, 0xc1, 0xe9, 0x8e, 0x71, 0xce, 0xbe, 0x0b, 0x1f, 0x0b, 0xf6, 0xe7, 0x2d, 0x78, 0x34, - 0xb3, 0x46, 0xd2, 0x79, 0xb0, 0x46, 0x0b, 0x35, 0x63, 0xd8, 0x38, 0xe2, 0x8f, 0x04, 0xe0, 0x18, - 0xc7, 0xb0, 0x17, 0x2d, 0x74, 0xb5, 0x17, 0xfd, 0x53, 0x0b, 0x52, 0x57, 0xfd, 0x21, 0x70, 0x9e, - 0x65, 0x93, 0xf3, 0x7c, 0xbc, 0x97, 0xd1, 0xcc, 0x61, 0x3a, 0xff, 0x79, 0x02, 0x8e, 0xe7, 0x78, - 0x90, 0x6f, 0xc3, 0xd4, 0x46, 0x8d, 0x98, 0x21, 0x43, 0x3a, 0x85, 0xd2, 0xeb, 0x18, 0x5f, 0x64, - 0xfe, 0xd8, 0xde, 0x6e, 0x69, 0x2a, 0x85, 0x82, 0xd3, 0x4d, 0xa0, 0xcf, 0x5b, 0x70, 0xd4, 0xb9, - 0x13, 0x2e, 0xd1, 0x17, 0x84, 0x5b, 0x9b, 0x6f, 0xf8, 0xb5, 0x2d, 0xca, 0x98, 0xc9, 0x6d, 0xf5, - 0x62, 0xa6, 0x28, 0xfc, 0x56, 0x35, 0x85, 0x6f, 0x34, 0x3f, 0xbd, 0xb7, 0x5b, 0x3a, 0x9a, 0x85, - 0x85, 0x33, 0xdb, 0x42, 0x58, 0xe4, 0x38, 0x73, 0xa2, 0xcd, 0x4e, 0x41, 0x6d, 0xb2, 0x5c, 0xfd, - 0x39, 0x4b, 0x2c, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x81, 0xe1, 0x0d, 0x19, 0xbf, 0x22, 0x83, 0xe5, - 0x8e, 0x07, 0xb2, 0x73, 0x54, 0x0f, 0x6e, 0x80, 0xa3, 0x90, 0x70, 0x4c, 0x14, 0xbd, 0x0e, 0x45, - 0x6f, 0x3d, 0x14, 0xa1, 0xf5, 0xb2, 0xed, 0x80, 0x4d, 0x4b, 0x6b, 0x1e, 0x3a, 0x6a, 0x75, 0xb9, - 0x8a, 0x69, 0x45, 0x74, 0x05, 0x8a, 0xc1, 0xed, 0xba, 0xd0, 0xe3, 0x64, 0x6e, 0x52, 0x3c, 0xbf, - 0x98, 0xd3, 0x2b, 0x46, 0x09, 0xcf, 0x2f, 0x62, 0x4a, 0x02, 0x55, 0xa0, 0x9f, 0xb9, 0x5d, 0x0b, - 0xd6, 0x36, 0xf3, 0x29, 0xdf, 0x21, 0x7c, 0x01, 0xf7, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x6b, - 0x30, 0x50, 0x73, 0xbd, 0x3a, 0x09, 0x04, 0x2f, 0xfb, 0xe1, 0x4c, 0x8d, 0x0d, 0xc3, 0xc8, 0xa1, - 0xc9, 0x15, 0x18, 0x0c, 0x03, 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6d, 0xae, 0xcb, 0x1b, 0x2b, 0x9b, - 0x2a, 0x69, 0x6d, 0x2e, 0x57, 0x3b, 0x52, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xbd, - 0x26, 0x5c, 0xaa, 0x33, 0xc5, 0x9b, 0x66, 0xf4, 0xaf, 0xf9, 0x81, 0xbd, 0xdd, 0x52, 0x61, 0x79, - 0x01, 0x17, 0xd6, 0x6b, 0x68, 0x15, 0x06, 0xd7, 0x79, 0xbc, 0x20, 0x21, 0x1f, 0x7d, 0x32, 0x3b, - 0x94, 0x51, 0x2a, 0xa4, 0x10, 0xf7, 0x6d, 0x15, 0x00, 0x2c, 0x89, 0xb0, 0x94, 0x5b, 0x2a, 0xee, - 0x91, 0x08, 0xbb, 0x3a, 0x7b, 0xb0, 0x58, 0x55, 0xfc, 0xa9, 0x11, 0x47, 0x4f, 0xc2, 0x1a, 0x45, - 0xba, 0xaa, 0x9d, 0x7b, 0xed, 0x80, 0xe5, 0xe4, 0x10, 0x8a, 0x99, 0xcc, 0x55, 0x3d, 0x27, 0x91, - 0x3a, 0xad, 0x6a, 0x85, 0x84, 0x63, 0xa2, 0x68, 0x0b, 0xc6, 0xb6, 0xc3, 0xd6, 0x26, 0x91, 0x5b, - 0x9a, 0x85, 0xeb, 0xcb, 0xe1, 0x66, 0x6f, 0x0a, 0x44, 0x37, 0x88, 0xda, 0x4e, 0x23, 0x75, 0x0a, - 0xb1, 0x67, 0xcd, 0x4d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xb7, 0xed, 0xdf, 0xde, 0x89, - 0x88, 0x88, 0x96, 0x9a, 0x39, 0xfc, 0x6f, 0x71, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, - 0x4d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xc5, 0x3e, 0x27, 0x91, 0x72, 0x06, 0x85, 0x9d, - 0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x36, 0xfd, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x54, 0xfe, 0x29, - 0x59, 0xc9, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x54, 0x87, 0xf1, 0x96, 0x1f, - 0x44, 0x77, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x41, 0x50, 0x6a, 0x60, 0x8a, 0x16, 0x99, 0x59, 0x90, - 0x09, 0xc1, 0x09, 0x9a, 0xe8, 0xe3, 0x30, 0x18, 0xd6, 0x9c, 0x06, 0x29, 0x5f, 0x9f, 0x3e, 0x92, - 0x7f, 0xfd, 0x54, 0x39, 0x4a, 0xce, 0xea, 0xe2, 0xe1, 0x9e, 0x38, 0x0a, 0x96, 0xe4, 0xd0, 0x32, - 0xf4, 0xb3, 0x54, 0xd6, 0x2c, 0xb4, 0x6f, 0x4e, 0x44, 0xf9, 0x94, 0x53, 0x0f, 0x3f, 0x9b, 0x58, - 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x92, 0x02, 0x3f, 0x9c, 0x3e, 0x96, 0xbf, 0x07, 0x84, 0x80, - 0xe1, 0x7a, 0xb5, 0xd3, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x1e, 0xef, - 0x60, 0xb0, 0x99, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xc7, 0x43, 0x69, - 0x9e, 0x85, 0x49, 0x98, 0xfe, 0x63, 0x2b, 0x65, 0xb1, 0xf1, 0x91, 0x5e, 0x05, 0xde, 0x0f, 0xf0, - 0xe1, 0xfa, 0x79, 0x0b, 0x8e, 0xb7, 0x32, 0x3f, 0x44, 0x30, 0x00, 0xbd, 0xc9, 0xcd, 0xf9, 0xa7, - 0xab, 0x30, 0xd0, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0x52, 0x38, 0x50, 0x7c, 0xcf, 0xc2, 0x81, 0x15, - 0x18, 0xaa, 0xf1, 0x97, 0x9c, 0x4c, 0x5f, 0xd0, 0x53, 0x10, 0x53, 0xae, 0xa7, 0x15, 0x15, 0xb1, - 0x22, 0x81, 0x7e, 0xd2, 0x82, 0xd3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0xc2, 0x5c, 0x93, 0x8b, 0xb5, - 0x96, 0xc5, 0xf7, 0xa7, 0xf8, 0x7f, 0x03, 0x79, 0xbf, 0x1b, 0x02, 0xee, 0xdc, 0x18, 0x5a, 0xcc, - 0x90, 0xab, 0x0d, 0x98, 0x3a, 0xc9, 0x1e, 0x64, 0x6b, 0x2f, 0xc2, 0x68, 0xd3, 0x6f, 0x7b, 0x91, - 0xb0, 0xba, 0x14, 0xa6, 0x5b, 0xcc, 0x64, 0x69, 0x45, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x22, 0x37, - 0x74, 0xdf, 0x12, 0xb9, 0x77, 0x60, 0xd4, 0xd3, 0x1c, 0x12, 0x3a, 0xbd, 0x60, 0x85, 0x74, 0x51, - 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0xb3, 0xb4, 0x0c, 0xde, 0x9b, 0xb4, 0xec, 0x50, - 0x9f, 0xc4, 0xf6, 0x6f, 0x16, 0x32, 0x5e, 0x0c, 0x5c, 0x2a, 0xf7, 0x9a, 0x29, 0x95, 0x3b, 0x97, - 0x94, 0xca, 0xa5, 0x54, 0x55, 0x86, 0x40, 0xae, 0xf7, 0x1c, 0x9a, 0x3d, 0x07, 0xa6, 0xfe, 0x61, - 0x0b, 0x4e, 0x30, 0xdd, 0x07, 0x6d, 0xe0, 0x3d, 0xeb, 0x3b, 0x98, 0x41, 0xec, 0xb5, 0x6c, 0x72, - 0x38, 0xaf, 0x1d, 0xbb, 0x01, 0x67, 0xbb, 0xdd, 0xbb, 0xcc, 0xbe, 0xb8, 0xae, 0xcc, 0x2b, 0x62, - 0xfb, 0xe2, 0x7a, 0x79, 0x11, 0x33, 0x48, 0xaf, 0x61, 0x17, 0xed, 0xff, 0xdb, 0x82, 0x62, 0xc5, - 0xaf, 0x1f, 0xc2, 0x8b, 0xfe, 0x63, 0xc6, 0x8b, 0xfe, 0x91, 0xec, 0x1b, 0xbf, 0x9e, 0xab, 0xec, - 0x5b, 0x4a, 0x28, 0xfb, 0x4e, 0xe7, 0x11, 0xe8, 0xac, 0xda, 0xfb, 0xe5, 0x22, 0x8c, 0x54, 0xfc, - 0xba, 0xda, 0x67, 0xff, 0xfd, 0xfd, 0xb8, 0x11, 0xe5, 0x66, 0xcd, 0xd2, 0x28, 0x33, 0x7b, 0x62, - 0x19, 0xf5, 0xe2, 0xbb, 0xcc, 0x9b, 0xe8, 0x16, 0x71, 0x37, 0x36, 0x23, 0x52, 0x4f, 0x7e, 0xce, - 0xe1, 0x79, 0x13, 0x7d, 0xab, 0x08, 0x13, 0x89, 0xd6, 0x51, 0x03, 0xc6, 0x1a, 0xba, 0x2a, 0x49, - 0xac, 0xd3, 0xfb, 0xd2, 0x42, 0x09, 0x6f, 0x0c, 0xad, 0x08, 0x9b, 0xc4, 0xd1, 0x2c, 0x80, 0xa7, - 0xdb, 0xa4, 0xab, 0x00, 0xcb, 0x9a, 0x3d, 0xba, 0x86, 0x81, 0x5e, 0x82, 0x91, 0xc8, 0x6f, 0xf9, - 0x0d, 0x7f, 0x63, 0xe7, 0x2a, 0x91, 0x11, 0x39, 0x95, 0xc9, 0xf2, 0x5a, 0x0c, 0xc2, 0x3a, 0x1e, - 0xba, 0x0b, 0x53, 0x8a, 0x48, 0xf5, 0x01, 0xa8, 0xd7, 0x98, 0xd8, 0x64, 0x35, 0x49, 0x11, 0xa7, - 0x1b, 0x41, 0xaf, 0xc0, 0x38, 0xb3, 0x9d, 0x66, 0xf5, 0xaf, 0x92, 0x1d, 0x19, 0xa9, 0x99, 0x71, - 0xd8, 0x2b, 0x06, 0x04, 0x27, 0x30, 0xd1, 0x02, 0x4c, 0x35, 0xdd, 0x30, 0x51, 0x7d, 0x80, 0x55, - 0x67, 0x1d, 0x58, 0x49, 0x02, 0x71, 0x1a, 0xdf, 0xfe, 0x75, 0x31, 0xc7, 0x5e, 0xe4, 0x7e, 0xb0, - 0x1d, 0xdf, 0xdf, 0xdb, 0xf1, 0x9b, 0x16, 0x4c, 0xd2, 0xd6, 0x99, 0x41, 0xa8, 0x64, 0xa4, 0x54, - 0x2e, 0x0f, 0xab, 0x43, 0x2e, 0x8f, 0x73, 0xf4, 0xd8, 0xae, 0xfb, 0xed, 0x48, 0x48, 0x47, 0xb5, - 0x73, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0x84, 0xd7, 0xbd, 0x8e, 0x47, 0x82, 0x00, - 0x0b, 0xa8, 0x4c, 0xf5, 0xd1, 0x97, 0x9d, 0xea, 0x83, 0x47, 0x6c, 0x17, 0x76, 0x74, 0x82, 0xa5, - 0xd5, 0x22, 0xb6, 0x4b, 0x03, 0xbb, 0x18, 0xc7, 0xfe, 0x76, 0x11, 0x46, 0x2b, 0x7e, 0x3d, 0x36, - 0xec, 0x78, 0xd1, 0x30, 0xec, 0x38, 0x9b, 0x30, 0xec, 0x98, 0xd4, 0x71, 0x35, 0x33, 0x8e, 0x37, - 0x01, 0xf9, 0x22, 0x90, 0xfc, 0x65, 0xe2, 0x31, 0xbb, 0x37, 0x61, 0xa8, 0x57, 0x8c, 0xcd, 0x1e, - 0xae, 0xa7, 0x30, 0x70, 0x46, 0xad, 0x0f, 0x4c, 0x42, 0x0e, 0xd7, 0x24, 0xe4, 0x4f, 0x2c, 0xb6, - 0x02, 0x16, 0x57, 0xab, 0xdc, 0x56, 0x19, 0x5d, 0x84, 0x11, 0x76, 0x5a, 0xb2, 0x90, 0x11, 0xd2, - 0x72, 0x82, 0xa5, 0xf1, 0x5c, 0x8d, 0x8b, 0xb1, 0x8e, 0x83, 0xce, 0xc3, 0x50, 0x48, 0x9c, 0xa0, - 0xb6, 0xa9, 0xae, 0x0a, 0x61, 0xe6, 0xc0, 0xcb, 0xb0, 0x82, 0xa2, 0xb7, 0xe2, 0xc0, 0xe3, 0xc5, - 0x7c, 0xc3, 0x67, 0xbd, 0x3f, 0x7c, 0xbb, 0xe5, 0x47, 0x1b, 0xb7, 0x6f, 0x01, 0x4a, 0xe3, 0xf7, - 0xe0, 0x49, 0x56, 0x32, 0x43, 0xe3, 0x0e, 0xa7, 0xc2, 0xe2, 0xfe, 0x9b, 0x05, 0xe3, 0x15, 0xbf, - 0x4e, 0x8f, 0x81, 0xef, 0xa5, 0x3d, 0xaf, 0x67, 0x5d, 0x18, 0xe8, 0x90, 0x75, 0xe1, 0x31, 0xe8, - 0xaf, 0xf8, 0xf5, 0x2e, 0xe1, 0x7b, 0x7f, 0xc5, 0x82, 0xc1, 0x8a, 0x5f, 0x3f, 0x04, 0x25, 0xce, - 0x6b, 0xa6, 0x12, 0xe7, 0x44, 0xce, 0xba, 0xc9, 0xd1, 0xdb, 0xfc, 0x79, 0x1f, 0x8c, 0xd1, 0x7e, - 0xfa, 0x1b, 0x72, 0x2a, 0x8d, 0x61, 0xb3, 0x7a, 0x18, 0x36, 0xfa, 0xa4, 0xf0, 0x1b, 0x0d, 0xff, - 0x4e, 0x72, 0x5a, 0x97, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x2c, 0x0c, 0xb5, 0x02, 0xb2, 0xed, 0xfa, - 0x82, 0x57, 0xd7, 0x54, 0x62, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0xc4, 0x87, 0xae, 0x47, 0xf9, - 0x92, 0x9a, 0xef, 0xd5, 0xb9, 0x9e, 0xa3, 0x28, 0x52, 0x83, 0x69, 0xe5, 0xd8, 0xc0, 0x42, 0xb7, - 0x60, 0x98, 0xfd, 0x67, 0xc7, 0x4e, 0xff, 0x81, 0x8f, 0x1d, 0x91, 0x2c, 0x59, 0x10, 0xc0, 0x31, - 0x2d, 0xf4, 0x3c, 0x40, 0x24, 0xd3, 0xeb, 0x84, 0x22, 0x8c, 0xab, 0x7a, 0xd7, 0xa8, 0xc4, 0x3b, - 0x21, 0xd6, 0xb0, 0xd0, 0x33, 0x30, 0x1c, 0x39, 0x6e, 0xe3, 0x9a, 0xeb, 0x31, 0x5b, 0x00, 0xda, - 0x7f, 0x91, 0xb3, 0x58, 0x14, 0xe2, 0x18, 0x4e, 0xf9, 0x4a, 0x16, 0xdd, 0x6a, 0x7e, 0x27, 0x12, - 0xe9, 0xf9, 0x8a, 0x9c, 0xaf, 0xbc, 0xa6, 0x4a, 0xb1, 0x86, 0x81, 0x36, 0xe1, 0x94, 0xeb, 0xb1, - 0x34, 0x5a, 0xa4, 0xba, 0xe5, 0xb6, 0xd6, 0xae, 0x55, 0x6f, 0x92, 0xc0, 0x5d, 0xdf, 0x99, 0x77, - 0x6a, 0x5b, 0xc4, 0xab, 0x33, 0xb1, 0xc3, 0xd0, 0xfc, 0xe3, 0xa2, 0x8b, 0xa7, 0xca, 0x1d, 0x70, - 0x71, 0x47, 0x4a, 0xc8, 0xa6, 0xdb, 0x31, 0x20, 0x4e, 0x53, 0xc8, 0x17, 0x78, 0x0a, 0x1e, 0x56, - 0x82, 0x05, 0xc4, 0x7e, 0x81, 0xed, 0x89, 0xeb, 0x55, 0xf4, 0xb4, 0x71, 0xbc, 0x1c, 0xd7, 0x8f, - 0x97, 0xfd, 0xdd, 0xd2, 0xc0, 0xf5, 0xaa, 0x16, 0xe9, 0xe8, 0x12, 0x1c, 0xab, 0xf8, 0xf5, 0x8a, - 0x1f, 0x44, 0xcb, 0x7e, 0x70, 0xc7, 0x09, 0xea, 0x72, 0x09, 0x96, 0x64, 0xac, 0x27, 0x7a, 0xc6, - 0xf6, 0xf3, 0x13, 0xc8, 0x88, 0xe3, 0xf4, 0x02, 0xe3, 0x10, 0x0f, 0xe8, 0x5a, 0x5b, 0x63, 0xbc, - 0x8a, 0x4a, 0x56, 0x77, 0xd9, 0x89, 0x08, 0xba, 0x0e, 0x63, 0x35, 0xfd, 0xda, 0x16, 0xd5, 0x9f, - 0x92, 0x97, 0x9d, 0x71, 0xa7, 0x67, 0xde, 0xf3, 0x66, 0x7d, 0xfb, 0x1b, 0x96, 0x68, 0x85, 0x4b, - 0x3e, 0xb8, 0x0d, 0x6d, 0xf7, 0x33, 0x77, 0x01, 0xa6, 0x02, 0xbd, 0x8a, 0x66, 0x8b, 0x76, 0x8c, - 0x67, 0xff, 0x49, 0x00, 0x71, 0x1a, 0x1f, 0x7d, 0x12, 0x4e, 0x1a, 0x85, 0x52, 0x2d, 0xaf, 0xe5, - 0xe0, 0x66, 0xb2, 0x21, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x08, 0xc7, 0x93, 0xdf, 0x25, - 0xa4, 0x35, 0xf7, 0xf9, 0x75, 0x85, 0x83, 0x7d, 0x9d, 0xfd, 0x12, 0x4c, 0xd1, 0x67, 0xbc, 0x62, - 0x49, 0xd9, 0xfc, 0x75, 0x0f, 0xa7, 0xf5, 0xdb, 0x43, 0xec, 0x1a, 0x4c, 0x64, 0xa0, 0x43, 0x9f, - 0x86, 0xf1, 0x90, 0xb0, 0x18, 0x72, 0x52, 0x4a, 0xd8, 0xc1, 0x2f, 0xbe, 0xba, 0xa4, 0x63, 0xf2, - 0x97, 0x90, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x13, 0xc6, 0xef, 0xb8, 0x5e, 0xdd, 0xbf, 0x13, 0x4a, - 0xfa, 0x43, 0xf9, 0x2a, 0x87, 0x5b, 0x1c, 0x33, 0xd1, 0x47, 0xa3, 0xb9, 0x5b, 0x06, 0x31, 0x9c, - 0x20, 0x4e, 0x8f, 0x9a, 0xa0, 0xed, 0xcd, 0x85, 0x37, 0x42, 0x12, 0x88, 0x08, 0x77, 0xec, 0xa8, - 0xc1, 0xb2, 0x10, 0xc7, 0x70, 0x7a, 0xd4, 0xb0, 0x3f, 0xcc, 0xb1, 0x9e, 0x9d, 0x65, 0xe2, 0xa8, - 0xc1, 0xaa, 0x14, 0x6b, 0x18, 0xf4, 0x28, 0x66, 0xff, 0x56, 0x7d, 0x0f, 0xfb, 0x7e, 0x24, 0x0f, - 0x6f, 0x96, 0xae, 0x53, 0x2b, 0xc7, 0x06, 0x56, 0x4e, 0x3c, 0xbd, 0xbe, 0x83, 0xc6, 0xd3, 0x43, - 0x51, 0x87, 0x58, 0x02, 0x3c, 0x22, 0xf4, 0xa5, 0x4e, 0xb1, 0x04, 0xf6, 0xef, 0x2b, 0xce, 0x00, - 0xe5, 0x05, 0xd6, 0xc5, 0x00, 0xf5, 0xf3, 0x80, 0x81, 0x4c, 0x29, 0x5a, 0xe5, 0xa3, 0x23, 0x61, - 0x68, 0x09, 0x06, 0xc3, 0x9d, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x29, 0x59, 0xab, 0x0c, 0x45, 0xcb, - 0x08, 0xce, 0xab, 0x60, 0x59, 0x17, 0xd5, 0xe0, 0x88, 0xa0, 0xb8, 0xb0, 0xe9, 0x78, 0x2a, 0x51, - 0x24, 0xb7, 0x7e, 0xbc, 0xb8, 0xb7, 0x5b, 0x3a, 0x22, 0x5a, 0xd6, 0xc1, 0xfb, 0xbb, 0x25, 0xba, - 0x25, 0x33, 0x20, 0x38, 0x8b, 0x1a, 0x5f, 0xf2, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xdd, - 0x6d, 0x90, 0x4e, 0x8a, 0xe5, 0xaa, 0x81, 0x29, 0x96, 0xbc, 0x51, 0x86, 0x13, 0xd4, 0xd0, 0x6d, - 0x98, 0x70, 0x5a, 0xad, 0xb9, 0xa0, 0xe9, 0x07, 0xb2, 0x81, 0x91, 0x7c, 0x0d, 0xc5, 0x9c, 0x89, - 0xca, 0xf3, 0x44, 0x26, 0x0a, 0x71, 0x92, 0x20, 0x1d, 0x28, 0xb1, 0xd1, 0x8c, 0x81, 0x1a, 0x8b, - 0x07, 0x4a, 0xec, 0xcb, 0x8c, 0x81, 0xca, 0x80, 0xe0, 0x2c, 0x6a, 0xf6, 0x0f, 0x30, 0xc6, 0x9f, - 0xc5, 0x9b, 0x66, 0x6e, 0x46, 0x4d, 0x18, 0x6b, 0xb1, 0x63, 0x5f, 0xe4, 0x70, 0x13, 0x47, 0xc5, - 0x8b, 0x3d, 0x0a, 0x42, 0xef, 0xb0, 0x2c, 0xb4, 0x86, 0x41, 0x6c, 0x45, 0x27, 0x87, 0x4d, 0xea, - 0xf6, 0x2f, 0xcd, 0x30, 0xd6, 0xb1, 0xca, 0xa5, 0x9b, 0x83, 0xc2, 0xe9, 0x52, 0xc8, 0x33, 0x66, - 0xf2, 0xf5, 0x08, 0xf1, 0xfa, 0x12, 0x8e, 0x9b, 0x58, 0xd6, 0x45, 0x9f, 0x82, 0x71, 0xd7, 0x73, - 0xe3, 0xec, 0xcd, 0xe1, 0xf4, 0xd1, 0xfc, 0x68, 0x5e, 0x0a, 0x4b, 0xcf, 0xef, 0xa8, 0x57, 0xc6, - 0x09, 0x62, 0xe8, 0x2d, 0x66, 0x23, 0x2a, 0x49, 0x17, 0x7a, 0x21, 0xad, 0x9b, 0x83, 0x4a, 0xb2, - 0x1a, 0x11, 0xd4, 0x86, 0x23, 0xe9, 0x2c, 0xd6, 0xe1, 0xb4, 0x9d, 0xff, 0x36, 0x4a, 0x27, 0xa2, - 0x8e, 0x13, 0xf1, 0xa5, 0x61, 0x21, 0xce, 0xa2, 0x8f, 0xae, 0x25, 0x73, 0x0c, 0x17, 0x0d, 0x0d, - 0x44, 0x2a, 0xcf, 0xf0, 0x58, 0xc7, 0xf4, 0xc2, 0x1b, 0x70, 0x5a, 0x4b, 0xd3, 0x7a, 0x39, 0x70, - 0x98, 0x8d, 0x92, 0xcb, 0x6e, 0x23, 0x8d, 0xa9, 0x7d, 0x74, 0x6f, 0xb7, 0x74, 0x7a, 0xad, 0x13, - 0x22, 0xee, 0x4c, 0x07, 0x5d, 0x87, 0x63, 0x3c, 0x16, 0xcd, 0x22, 0x71, 0xea, 0x0d, 0xd7, 0x53, - 0x5c, 0x33, 0x3f, 0xbb, 0x4e, 0xee, 0xed, 0x96, 0x8e, 0xcd, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0xe8, - 0x35, 0x18, 0xae, 0x7b, 0xf2, 0x94, 0x1d, 0x30, 0x32, 0xe1, 0x0e, 0x2f, 0xae, 0x56, 0xd5, 0xf7, - 0xc7, 0x7f, 0x70, 0x5c, 0x01, 0x6d, 0x70, 0x15, 0x98, 0x92, 0x5b, 0x0e, 0xa6, 0x42, 0x94, 0x26, - 0x45, 0xfb, 0x46, 0x70, 0x07, 0xae, 0xfb, 0x55, 0x0e, 0x80, 0x46, 0xdc, 0x07, 0x83, 0x30, 0x7a, - 0x13, 0x90, 0xc8, 0xb8, 0x34, 0x57, 0x63, 0x09, 0x02, 0x35, 0xbb, 0x54, 0x25, 0x42, 0xa8, 0xa6, - 0x30, 0x70, 0x46, 0x2d, 0x74, 0x85, 0x1e, 0x8f, 0x7a, 0xa9, 0x38, 0x7e, 0x55, 0xbe, 0xf5, 0x45, - 0xd2, 0x0a, 0x08, 0x33, 0xa5, 0x34, 0x29, 0xe2, 0x44, 0x3d, 0x54, 0x87, 0x53, 0x4e, 0x3b, 0xf2, - 0x99, 0x76, 0xd1, 0x44, 0x5d, 0xf3, 0xb7, 0x88, 0xc7, 0x14, 0xfb, 0x43, 0x2c, 0xf4, 0xe9, 0xa9, - 0xb9, 0x0e, 0x78, 0xb8, 0x23, 0x15, 0xfa, 0x9c, 0xa2, 0x63, 0xa1, 0x29, 0xfe, 0x0c, 0x3f, 0x75, - 0xae, 0x0d, 0x97, 0x18, 0xe8, 0x25, 0x18, 0xd9, 0xf4, 0xc3, 0x68, 0x95, 0x44, 0x77, 0xfc, 0x60, - 0x4b, 0xa4, 0x78, 0x88, 0xd3, 0xea, 0xc4, 0x20, 0xac, 0xe3, 0xa1, 0xa7, 0x60, 0x90, 0x99, 0x9d, - 0x95, 0x17, 0xd9, 0x5d, 0x3b, 0x14, 0x9f, 0x31, 0x57, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xae, - 0x2c, 0xb0, 0xe3, 0x38, 0x81, 0x5a, 0xae, 0x2c, 0x60, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x74, 0x02, - 0x52, 0x09, 0xfc, 0x1a, 0x09, 0xb5, 0x64, 0x4e, 0x8f, 0xf0, 0x04, 0x16, 0x74, 0xb9, 0x56, 0xb3, - 0x10, 0x70, 0x76, 0x3d, 0x44, 0xd2, 0x29, 0x8a, 0xc7, 0xf3, 0xd5, 0xae, 0x69, 0x76, 0xb0, 0xc7, - 0x2c, 0xc5, 0x1e, 0x4c, 0xaa, 0xe4, 0xc8, 0x3c, 0x65, 0x45, 0x38, 0x3d, 0xc1, 0xd6, 0x76, 0xef, - 0xf9, 0x2e, 0x94, 0x22, 0xbb, 0x9c, 0xa0, 0x84, 0x53, 0xb4, 0x8d, 0xd8, 0xba, 0x93, 0x5d, 0x63, - 0xeb, 0x5e, 0x80, 0xe1, 0xb0, 0x7d, 0xbb, 0xee, 0x37, 0x1d, 0xd7, 0x63, 0xd6, 0x3b, 0xda, 0xc3, - 0xbd, 0x2a, 0x01, 0x38, 0xc6, 0x41, 0xcb, 0x30, 0xe4, 0x48, 0x2d, 0x35, 0xca, 0x0f, 0x1b, 0xa8, - 0x74, 0xd3, 0x3c, 0x92, 0x96, 0xd4, 0x4b, 0xab, 0xba, 0xe8, 0x55, 0x18, 0x13, 0xa1, 0x49, 0x78, - 0x14, 0x1e, 0x66, 0x5d, 0xa3, 0x39, 0x53, 0x57, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x0d, 0x18, 0x89, - 0xfc, 0x86, 0x90, 0x71, 0x86, 0xd3, 0xc7, 0xf3, 0xa3, 0xfb, 0xae, 0x29, 0x34, 0x5d, 0x7f, 0xa2, - 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe3, 0xeb, 0x9d, 0xa5, 0x6e, 0x22, 0xa1, 0x48, 0x48, 0x7f, 0x3a, - 0xcf, 0xf4, 0x92, 0xa1, 0x99, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x19, 0xa6, 0x5a, 0x81, - 0xeb, 0xb3, 0x35, 0xa1, 0xb4, 0xee, 0xd3, 0x66, 0xa2, 0xd6, 0x4a, 0x12, 0x01, 0xa7, 0xeb, 0xb0, - 0xc8, 0x32, 0xa2, 0x70, 0xfa, 0x24, 0x4f, 0x36, 0xc7, 0xe5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, - 0x61, 0x27, 0x31, 0x17, 0xe1, 0x4d, 0xcf, 0xe4, 0xc7, 0x2b, 0xd0, 0x45, 0x7d, 0x9c, 0xf7, 0x57, - 0x7f, 0x71, 0x4c, 0x01, 0xd5, 0xb5, 0x1c, 0xef, 0xf4, 0x05, 0x15, 0x4e, 0x9f, 0xea, 0x60, 0xfb, - 0x9b, 0x78, 0x2e, 0xc7, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x06, 0x4c, 0x8a, 0xb0, - 0x0b, 0xf1, 0x30, 0x9d, 0x8e, 0xfd, 0xa3, 0x70, 0x02, 0x86, 0x53, 0xd8, 0x3c, 0xd9, 0x9b, 0x73, - 0xbb, 0x41, 0xc4, 0xd1, 0x77, 0xcd, 0xf5, 0xb6, 0xc2, 0xe9, 0x33, 0xec, 0x7c, 0x10, 0xc9, 0xde, - 0x92, 0x50, 0x9c, 0x51, 0x03, 0xad, 0xc1, 0x64, 0x2b, 0x20, 0xa4, 0xc9, 0xde, 0x49, 0xe2, 0x3e, - 0x2b, 0xf1, 0xc0, 0x4a, 0xb4, 0x27, 0x95, 0x04, 0x6c, 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b, - 0x30, 0xe4, 0x6f, 0x93, 0x60, 0x93, 0x38, 0xf5, 0xe9, 0xb3, 0x1d, 0xbc, 0xf6, 0xc4, 0xe5, 0x76, - 0x5d, 0xe0, 0x26, 0x8c, 0x9a, 0x64, 0x71, 0x77, 0xa3, 0x26, 0xd9, 0x18, 0xfa, 0x4f, 0x2c, 0x38, - 0x29, 0xd5, 0x84, 0xd5, 0x16, 0x1d, 0xf5, 0x05, 0xdf, 0x0b, 0xa3, 0x80, 0x87, 0x02, 0x7a, 0x34, - 0x3f, 0x3c, 0xce, 0x5a, 0x4e, 0x25, 0xa5, 0x45, 0x38, 0x99, 0x87, 0x11, 0xe2, 0xfc, 0x16, 0xe9, - 0xcb, 0x3e, 0x24, 0x91, 0x3c, 0x8c, 0xe6, 0xc2, 0xe5, 0xb7, 0x16, 0x57, 0xa7, 0x1f, 0xe3, 0x71, - 0x8c, 0xe8, 0x66, 0xa8, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x2e, 0x42, 0xc1, 0x0f, 0xa7, 0x1f, 0x67, - 0x6b, 0xfb, 0x64, 0xce, 0x38, 0x5e, 0xaf, 0x72, 0xe3, 0xd6, 0xeb, 0x55, 0x5c, 0xf0, 0x43, 0x99, - 0x70, 0x8d, 0x3e, 0x67, 0xc3, 0xe9, 0x27, 0xb8, 0xcc, 0x59, 0x26, 0x5c, 0x63, 0x85, 0x38, 0x86, - 0xa3, 0x4d, 0x98, 0x08, 0x0d, 0xb1, 0x41, 0x38, 0x7d, 0x8e, 0x8d, 0xd4, 0x13, 0x79, 0x93, 0x66, - 0x60, 0x6b, 0x99, 0x90, 0x4c, 0x2a, 0x38, 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x70, 0x11, 0x4e, 0x3f, - 0xd9, 0x65, 0x77, 0x69, 0xc8, 0xfa, 0xee, 0xd2, 0x69, 0xe0, 0x04, 0x4d, 0x74, 0x43, 0x77, 0x89, - 0x3c, 0x9f, 0x6f, 0x28, 0x99, 0xe9, 0x0c, 0x39, 0x96, 0xe7, 0x08, 0x39, 0xf3, 0x7d, 0x30, 0x95, - 0xe2, 0xc2, 0x0e, 0xe2, 0x1f, 0x32, 0xb3, 0x05, 0x63, 0xc6, 0x4a, 0x7f, 0xa8, 0xe6, 0x43, 0x3f, - 0x03, 0x30, 0xac, 0xcc, 0x3a, 0x72, 0xf4, 0x6c, 0x53, 0xf7, 0xa5, 0x67, 0xbb, 0x60, 0x5a, 0x1f, - 0x9d, 0x4c, 0x5a, 0x1f, 0x0d, 0x55, 0xfc, 0xba, 0x61, 0x70, 0xb4, 0x96, 0x11, 0x41, 0x38, 0xef, - 0x8c, 0xee, 0xdd, 0x21, 0x4e, 0x53, 0x55, 0x15, 0x7b, 0x36, 0x63, 0xea, 0xeb, 0xa8, 0xfd, 0xba, - 0x0c, 0x53, 0x9e, 0xcf, 0x9e, 0x11, 0xa4, 0x2e, 0x79, 0x44, 0xc6, 0x0a, 0x0e, 0xeb, 0x11, 0xee, - 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0xb9, 0xa4, 0xba, 0x8d, 0xb3, 0x7a, 0x58, 0x40, - 0xe9, 0xf3, 0x95, 0xff, 0x0a, 0xa7, 0x27, 0xf3, 0x9f, 0xaf, 0xbc, 0x52, 0x92, 0x5f, 0x0c, 0x25, - 0xbf, 0xc8, 0xb4, 0x4b, 0x2d, 0xbf, 0x5e, 0xae, 0x88, 0x97, 0x88, 0x16, 0xdb, 0xbf, 0x5e, 0xae, - 0x60, 0x0e, 0x43, 0x73, 0x30, 0xc0, 0x7e, 0xc8, 0xc8, 0x41, 0x79, 0x27, 0x49, 0xb9, 0xa2, 0xe5, - 0xa4, 0x65, 0x15, 0xb0, 0xa8, 0xc8, 0xb4, 0x07, 0xf4, 0xf9, 0xc6, 0xb4, 0x07, 0x83, 0xf7, 0xa9, - 0x3d, 0x90, 0x04, 0x70, 0x4c, 0x0b, 0xdd, 0x85, 0x63, 0xc6, 0x93, 0x59, 0x79, 0x08, 0x42, 0xbe, - 0x91, 0x42, 0x02, 0x79, 0xfe, 0xb4, 0xe8, 0xf4, 0xb1, 0x72, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a, - 0xc0, 0x54, 0x2d, 0xd5, 0xea, 0x50, 0xef, 0xad, 0xaa, 0x75, 0x91, 0x6e, 0x31, 0x4d, 0x18, 0xbd, - 0x0a, 0x43, 0xef, 0xfa, 0xdc, 0xa0, 0x50, 0xbc, 0x9e, 0x64, 0x7c, 0x9b, 0xa1, 0xb7, 0xae, 0x57, - 0x59, 0xf9, 0xfe, 0x6e, 0x69, 0xa4, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x2a, 0xa0, 0x1f, 0xb3, 0x60, - 0x26, 0xfd, 0x26, 0x57, 0x9d, 0x1e, 0xeb, 0xbd, 0xd3, 0xb6, 0x68, 0x74, 0x66, 0x29, 0x97, 0x1c, - 0xee, 0xd0, 0x14, 0xfa, 0x28, 0xdd, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x84, 0xfe, 0x8f, 0xc6, 0xfb, - 0x89, 0x96, 0xee, 0xef, 0x96, 0x26, 0xf8, 0xe1, 0xed, 0xde, 0x53, 0x59, 0x08, 0x78, 0x05, 0xf4, - 0x83, 0x70, 0x2c, 0x48, 0xcb, 0xc8, 0x89, 0x7c, 0x27, 0x3c, 0xdd, 0xcb, 0x45, 0x90, 0x9c, 0x70, - 0x9c, 0x45, 0x10, 0x67, 0xb7, 0x63, 0xff, 0xa1, 0xc5, 0x74, 0x23, 0xa2, 0x5b, 0x24, 0x6c, 0x37, - 0xa2, 0x43, 0x30, 0xe2, 0x5b, 0x32, 0x6c, 0x13, 0xee, 0xdb, 0x0a, 0xef, 0xbf, 0xb3, 0x98, 0x15, - 0xde, 0x21, 0xfa, 0x13, 0xbe, 0x05, 0x43, 0x91, 0x68, 0x4d, 0x74, 0x3d, 0xcf, 0x62, 0x48, 0x76, - 0x8a, 0x59, 0x22, 0xaa, 0x77, 0x98, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0x5f, 0xf3, 0x19, 0x90, 0x90, - 0x43, 0x50, 0x01, 0x2f, 0x9a, 0x2a, 0xe0, 0x52, 0x97, 0x2f, 0xc8, 0x51, 0x05, 0xff, 0x57, 0x66, - 0xbf, 0x99, 0xfc, 0xf1, 0xfd, 0x6e, 0xfe, 0x69, 0x7f, 0xd1, 0x02, 0x88, 0xd3, 0xbe, 0xf4, 0x90, - 0xc0, 0xfb, 0x12, 0x7d, 0x79, 0xf9, 0x91, 0x5f, 0xf3, 0x1b, 0x42, 0x05, 0x75, 0x2a, 0xd6, 0x42, - 0xf3, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0xe3, 0x30, 0x17, 0x63, 0xbb, 0x08, 0x23, - 0x06, 0xf3, 0x57, 0x2c, 0x38, 0x9a, 0xe5, 0x9c, 0x42, 0xdf, 0xf1, 0x5c, 0x12, 0xab, 0x4c, 0x73, - 0xd5, 0x6c, 0xde, 0x14, 0xe5, 0x58, 0x61, 0xf4, 0x9c, 0x19, 0xfd, 0x60, 0x29, 0x49, 0xae, 0xc3, - 0x58, 0x25, 0x20, 0x1a, 0x7f, 0xf1, 0x7a, 0x9c, 0x2d, 0x69, 0x78, 0xfe, 0xd9, 0x03, 0x47, 0x7c, - 0xb2, 0xbf, 0x5a, 0x80, 0xa3, 0xdc, 0xc0, 0x6c, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, 0x2e, 0x5c, - 0x8a, 0xdf, 0x86, 0xd1, 0x96, 0x26, 0x3e, 0xef, 0x14, 0x5e, 0x5f, 0x17, 0xb3, 0xc7, 0x02, 0x3f, - 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x1d, 0x46, 0xc9, 0xb6, 0x5b, 0x53, 0x96, 0x45, 0x85, 0x03, 0x5f, - 0xd2, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0xf6, 0x6c, 0x16, 0xae, 0xb1, 0x68, 0x7d, 0x5d, - 0xac, 0x89, 0x7e, 0xce, 0x82, 0x13, 0x39, 0xc1, 0xf8, 0x69, 0x73, 0x77, 0x98, 0x29, 0x9f, 0x58, - 0xb6, 0xaa, 0x39, 0x6e, 0xe0, 0x87, 0x05, 0x14, 0x7d, 0x1c, 0xa0, 0x15, 0xa7, 0x30, 0xed, 0x12, - 0xb5, 0xdc, 0x88, 0x5f, 0xac, 0x85, 0xa2, 0x55, 0x99, 0x4e, 0x35, 0x5a, 0xf6, 0x57, 0xfa, 0xa0, - 0x9f, 0x19, 0x71, 0xa1, 0x0a, 0x0c, 0x6e, 0xf2, 0x48, 0x89, 0x1d, 0xe7, 0x8d, 0xe2, 0xca, 0xd0, - 0x8b, 0xf1, 0xbc, 0x69, 0xa5, 0x58, 0x92, 0x41, 0x2b, 0x70, 0x84, 0xa7, 0x67, 0x6d, 0x2c, 0x92, - 0x86, 0xb3, 0x23, 0x25, 0xd3, 0x05, 0xf6, 0xa9, 0x4a, 0x42, 0x5f, 0x4e, 0xa3, 0xe0, 0xac, 0x7a, - 0xe8, 0x75, 0x18, 0x8f, 0xdc, 0x26, 0xf1, 0xdb, 0x91, 0xa4, 0xc4, 0xf3, 0xa1, 0xaa, 0xc7, 0xd3, - 0x9a, 0x01, 0xc5, 0x09, 0x6c, 0xf4, 0x2a, 0x8c, 0xb5, 0x52, 0x32, 0xf8, 0xfe, 0x58, 0x58, 0x65, - 0xca, 0xdd, 0x4d, 0x5c, 0xe6, 0x9f, 0xd2, 0x66, 0xde, 0x38, 0x6b, 0x9b, 0x01, 0x09, 0x37, 0xfd, - 0x46, 0x9d, 0x71, 0xc0, 0xfd, 0x9a, 0x7f, 0x4a, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, 0x75, 0xc7, - 0x6d, 0xb4, 0x03, 0x12, 0x53, 0x19, 0x30, 0xa9, 0x2c, 0x27, 0xe0, 0x38, 0x55, 0xa3, 0xbb, 0x72, - 0x61, 0xf0, 0xc1, 0x28, 0x17, 0xec, 0x5f, 0x2d, 0x80, 0x31, 0xb5, 0xdf, 0xc3, 0xd9, 0x56, 0x5f, - 0x83, 0xbe, 0x8d, 0xa0, 0x55, 0x13, 0x06, 0x8b, 0x99, 0x5f, 0x76, 0x19, 0x57, 0x16, 0xf4, 0x2f, - 0xa3, 0xff, 0x31, 0xab, 0x45, 0xf7, 0xf8, 0xb1, 0x4a, 0xe0, 0xd3, 0x4b, 0x4e, 0x06, 0x53, 0x55, - 0x6e, 0x60, 0x83, 0xf2, 0xbd, 0xde, 0x21, 0xec, 0xb8, 0xf0, 0x65, 0xe1, 0x14, 0x0c, 0xdb, 0xbe, - 0xaa, 0x78, 0xad, 0x4b, 0x2a, 0xe8, 0x22, 0x8c, 0x88, 0x04, 0x98, 0xcc, 0x5b, 0x89, 0x6f, 0x26, - 0x66, 0x8b, 0xb8, 0x18, 0x17, 0x63, 0x1d, 0xc7, 0xfe, 0xf1, 0x02, 0x1c, 0xc9, 0x70, 0x37, 0xe5, - 0xd7, 0xc8, 0x86, 0x1b, 0x46, 0xc1, 0x4e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0x7a, 0x56, - 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0x77, 0x2e, 0x01, 0x3d, 0xd8, 0xe5, 0x44, 0xaf, 0xed, 0x76, - 0x48, 0x64, 0x86, 0x03, 0x75, 0x6d, 0x33, 0xc3, 0x05, 0x06, 0xa1, 0x4f, 0xc0, 0x0d, 0xa5, 0x8d, - 0xd7, 0x9e, 0x80, 0x5c, 0x1f, 0xcf, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x0f, 0xc5, - 0x38, 0xf2, 0x35, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0x27, 0x73, 0x1d, 0xd0, 0x69, 0xd7, - 0x9b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xf2, 0xe4, 0xd1, 0xae, 0x49, 0x6b, 0x73, 0x45, 0x94, 0x63, - 0x85, 0x81, 0xce, 0x41, 0x3f, 0x93, 0xdb, 0x27, 0x93, 0xdf, 0xe1, 0xf9, 0x45, 0x1e, 0x0b, 0x94, - 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf1, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x23, 0x79, 0xa1, 0xd0, - 0xee, 0xfa, 0x7e, 0x03, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0xaa, 0x11, 0x3b, 0x75, 0x3f, - 0xd4, 0x06, 0xed, 0x29, 0x18, 0xdc, 0x22, 0x3b, 0x81, 0xeb, 0x6d, 0x24, 0xad, 0x5d, 0xaf, 0xf2, - 0x62, 0x2c, 0xe1, 0x66, 0x96, 0xf8, 0xc1, 0x07, 0x91, 0x25, 0x5e, 0x5f, 0x01, 0x43, 0x5d, 0xd9, - 0x93, 0x9f, 0x28, 0xc2, 0x04, 0x9e, 0x5f, 0xfc, 0x60, 0x22, 0x6e, 0xa4, 0x27, 0xe2, 0x41, 0x24, - 0x53, 0x3f, 0xd8, 0x6c, 0xfc, 0x9e, 0x05, 0x13, 0x2c, 0x0d, 0xa7, 0x88, 0x1e, 0xe3, 0xfa, 0xde, - 0x21, 0x3c, 0x05, 0x1e, 0x83, 0xfe, 0x80, 0x36, 0x2a, 0x66, 0x50, 0xed, 0x71, 0xd6, 0x13, 0xcc, - 0x61, 0xe8, 0x14, 0xf4, 0xb1, 0x2e, 0xd0, 0xc9, 0x1b, 0xe5, 0x47, 0xf0, 0xa2, 0x13, 0x39, 0x98, - 0x95, 0xb2, 0x38, 0x96, 0x98, 0xb4, 0x1a, 0x2e, 0xef, 0x74, 0x6c, 0x55, 0xf1, 0xfe, 0x08, 0x4d, - 0x93, 0xd9, 0xb5, 0xf7, 0x16, 0xc7, 0x32, 0x9b, 0x64, 0xe7, 0x67, 0xf6, 0x3f, 0x15, 0xe0, 0x4c, - 0x66, 0xbd, 0x9e, 0xe3, 0x58, 0x76, 0xae, 0xfd, 0x30, 0x93, 0xf6, 0x15, 0x0f, 0xd1, 0x97, 0xa0, - 0xaf, 0x57, 0xee, 0xbf, 0xbf, 0x87, 0xf0, 0x92, 0x99, 0x43, 0xf6, 0x3e, 0x09, 0x2f, 0x99, 0xd9, - 0xb7, 0x1c, 0x31, 0xc1, 0xb7, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0xe7, 0xe9, 0x39, 0xc3, 0x80, - 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x73, 0x30, 0xd1, 0x74, 0x3d, 0x7a, - 0xf8, 0xec, 0x98, 0xac, 0xb8, 0x52, 0xb7, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x7a, - 0x92, 0x7f, 0xdd, 0xab, 0x07, 0xda, 0x75, 0xb3, 0xa6, 0xc5, 0x89, 0x1a, 0xc5, 0x8c, 0x30, 0x94, - 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbb, 0x9c, 0x68, 0x34, 0x5b, 0x46, 0x34, 0xf3, 0x2a, 0x8c, 0xdd, - 0xb7, 0x9e, 0xc5, 0xfe, 0x66, 0x11, 0x1e, 0xe9, 0xb0, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76, - 0xd6, 0xa7, 0xe6, 0xa1, 0x02, 0x47, 0xd7, 0xdb, 0x8d, 0xc6, 0x0e, 0x73, 0xc0, 0x23, 0x75, 0x89, - 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xba, 0x9c, 0x81, 0x83, 0x33, 0x6b, 0xd2, 0x27, 0x16, 0xbd, - 0x49, 0x76, 0x14, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xe5, 0x6c, - 0x3b, 0x2e, 0x4f, 0x7a, 0x22, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x25, 0x11, 0x70, 0xba, - 0x4e, 0x8e, 0x4a, 0xa8, 0x78, 0x5f, 0x2a, 0x21, 0x33, 0x08, 0xe2, 0x40, 0x7e, 0x10, 0xc4, 0xce, - 0xe7, 0x62, 0xd7, 0x7c, 0x91, 0xef, 0xc0, 0xd8, 0x41, 0x2d, 0xc7, 0x9f, 0x82, 0xc1, 0x40, 0x64, - 0xe2, 0x4f, 0x78, 0xbb, 0xcb, 0x3c, 0xe5, 0x12, 0x6e, 0xff, 0x6f, 0x16, 0x28, 0x59, 0xb2, 0x19, - 0xef, 0xfc, 0x55, 0x66, 0x06, 0xcf, 0xa5, 0xe0, 0x5a, 0x88, 0xb3, 0x63, 0x9a, 0x19, 0x7c, 0x0c, - 0xc4, 0x26, 0x2e, 0x5f, 0x6e, 0x61, 0x1c, 0x59, 0xc3, 0x78, 0x40, 0x08, 0x0d, 0xa4, 0xc2, 0x40, - 0x9f, 0x80, 0xc1, 0xba, 0xbb, 0xed, 0x86, 0x42, 0x8e, 0x76, 0x60, 0x1d, 0x60, 0xfc, 0x7d, 0x8b, - 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x9f, 0xb2, 0x40, 0xa9, 0x4e, 0xaf, 0x10, 0xa7, 0x11, 0x6d, 0xa2, - 0x37, 0x00, 0x24, 0x05, 0x25, 0x7b, 0x93, 0x06, 0x5d, 0x80, 0x15, 0x64, 0xdf, 0xf8, 0x87, 0xb5, - 0x3a, 0xe8, 0x75, 0x18, 0xd8, 0x64, 0xb4, 0xc4, 0xb7, 0x9d, 0x53, 0xaa, 0x2e, 0x56, 0xba, 0xbf, - 0x5b, 0x3a, 0x6a, 0xb6, 0x29, 0x6f, 0x31, 0x5e, 0xcb, 0xfe, 0x89, 0x42, 0x3c, 0xa7, 0x6f, 0xb5, - 0xfd, 0xc8, 0x39, 0x04, 0x4e, 0xe4, 0xb2, 0xc1, 0x89, 0x3c, 0xd1, 0x49, 0x37, 0xcc, 0xba, 0x94, - 0xcb, 0x81, 0x5c, 0x4f, 0x70, 0x20, 0x4f, 0x76, 0x27, 0xd5, 0x99, 0xf3, 0xf8, 0x6f, 0x2c, 0x98, - 0x32, 0xf0, 0x0f, 0xe1, 0x02, 0x5c, 0x36, 0x2f, 0xc0, 0x47, 0xbb, 0x7e, 0x43, 0xce, 0xc5, 0xf7, - 0xa3, 0xc5, 0x44, 0xdf, 0xd9, 0x85, 0xf7, 0x2e, 0xf4, 0x6d, 0x3a, 0x41, 0x5d, 0xbc, 0xeb, 0x2f, - 0xf4, 0x34, 0xd6, 0xb3, 0x57, 0x9c, 0x40, 0x18, 0x83, 0x3c, 0x2b, 0x47, 0x9d, 0x16, 0x75, 0x35, - 0x04, 0x61, 0x4d, 0xa1, 0x4b, 0x30, 0x10, 0xd6, 0xfc, 0x96, 0xf2, 0x29, 0x64, 0x49, 0xd4, 0xab, - 0xac, 0x64, 0x7f, 0xb7, 0x84, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x6d, 0x18, 0x63, 0xbf, - 0x94, 0x65, 0x66, 0x31, 0x5f, 0x02, 0x53, 0xd5, 0x11, 0xb9, 0xd9, 0xb2, 0x51, 0x84, 0x4d, 0x52, - 0x33, 0x1b, 0x30, 0xac, 0x3e, 0xeb, 0xa1, 0x6a, 0xfe, 0xff, 0xba, 0x08, 0x47, 0x32, 0xd6, 0x1c, - 0x0a, 0x8d, 0x99, 0xb8, 0xd8, 0xe3, 0x52, 0x7d, 0x8f, 0x73, 0x11, 0xb2, 0x07, 0x60, 0x5d, 0xac, - 0xad, 0x9e, 0x1b, 0xbd, 0x11, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7b, 0xa3, 0xb4, 0xb1, 0x43, 0x1b, - 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0xe9, 0x83, 0xa3, 0x59, 0xe6, 0x2a, 0xe8, - 0x73, 0x30, 0xc0, 0x9c, 0xde, 0xa4, 0xe0, 0xec, 0xc5, 0x5e, 0x0d, 0x5d, 0x66, 0x99, 0xdf, 0x9c, - 0x08, 0x99, 0x3b, 0x2b, 0x8f, 0x23, 0x5e, 0xd8, 0x75, 0x98, 0x45, 0x9b, 0x2c, 0x94, 0x95, 0xb8, - 0x3d, 0xe5, 0xf1, 0xf1, 0x91, 0x9e, 0x3b, 0x20, 0xee, 0xdf, 0x30, 0x61, 0xf5, 0x25, 0x8b, 0xbb, - 0x5b, 0x7d, 0xc9, 0x96, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0x9c, 0xa8, 0xd8, 0xfd, 0x08, 0xe3, 0xb6, - 0x44, 0xea, 0x00, 0x16, 0x36, 0x44, 0x82, 0xc0, 0x8c, 0x0b, 0x23, 0xda, 0xc0, 0x3c, 0xd4, 0xc5, - 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xea, 0x02, 0xfa, 0x59, 0xed, 0xee, 0x17, 0xe7, 0xc1, - 0x87, 0x0d, 0xde, 0xe9, 0x54, 0xc2, 0x15, 0x31, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x35, 0x63, 0xcd, - 0xe7, 0x26, 0xcc, 0x32, 0x2f, 0xfc, 0xce, 0xf1, 0xe5, 0xed, 0x9f, 0xb3, 0x20, 0xe1, 0x2c, 0xa6, - 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x2c, 0xf4, 0x05, 0x7e, 0x43, 0xf2, 0x53, 0x0a, 0x03, 0xfb, - 0x0d, 0x82, 0x19, 0x84, 0x62, 0x44, 0xb1, 0x10, 0x6b, 0x54, 0x7f, 0xa0, 0x8b, 0xa7, 0xf7, 0x63, - 0xd0, 0xdf, 0x20, 0xdb, 0xa4, 0x91, 0xcc, 0x1b, 0x7b, 0x8d, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0xaf, - 0x0f, 0x4e, 0x77, 0x8c, 0x78, 0x47, 0x19, 0xcc, 0x0d, 0x27, 0x22, 0x77, 0x9c, 0x9d, 0x64, 0xbe, - 0xc4, 0xcb, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0xdc, 0xe6, 0x39, 0x80, 0x12, 0xc2, 0x61, 0x91, 0xfa, - 0x47, 0x40, 0x4d, 0x61, 0x63, 0xf1, 0x41, 0x08, 0x1b, 0x9f, 0x07, 0x08, 0xc3, 0x06, 0xb7, 0x09, - 0xad, 0x0b, 0x8f, 0xf0, 0x38, 0x57, 0x54, 0xf5, 0x9a, 0x80, 0x60, 0x0d, 0x0b, 0x2d, 0xc2, 0x64, - 0x2b, 0xf0, 0x23, 0x2e, 0x6b, 0x5f, 0xe4, 0x66, 0xd3, 0xfd, 0x66, 0xb0, 0xb1, 0x4a, 0x02, 0x8e, - 0x53, 0x35, 0xd0, 0x4b, 0x30, 0x22, 0x02, 0x90, 0x55, 0x7c, 0xbf, 0x21, 0xc4, 0x7b, 0xca, 0x92, - 0xb8, 0x1a, 0x83, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x02, 0xfc, 0xc1, 0xcc, 0x6a, 0x5c, 0x88, 0xaf, - 0xe1, 0x25, 0x92, 0x15, 0x0c, 0xf5, 0x94, 0xac, 0x20, 0x16, 0x78, 0x0e, 0xf7, 0xac, 0x4f, 0x86, - 0xae, 0x22, 0xc2, 0xaf, 0xf5, 0xc1, 0x11, 0xb1, 0x70, 0x1e, 0xf6, 0x72, 0xb9, 0x91, 0x5e, 0x2e, - 0x0f, 0x42, 0x24, 0xfa, 0xc1, 0x9a, 0x39, 0xec, 0x35, 0xf3, 0x93, 0x16, 0x98, 0x3c, 0x24, 0xfa, - 0x8f, 0x72, 0x13, 0xce, 0xbe, 0x94, 0xcb, 0x93, 0xc6, 0x91, 0xcc, 0xdf, 0x5b, 0xea, 0x59, 0xfb, - 0x7f, 0xb1, 0xe0, 0xd1, 0xae, 0x14, 0xd1, 0x12, 0x0c, 0x33, 0x46, 0x57, 0x7b, 0x17, 0x3f, 0xa9, - 0xdc, 0x2a, 0x24, 0x20, 0x87, 0xef, 0x8e, 0x6b, 0xa2, 0xa5, 0x54, 0x66, 0xdf, 0xa7, 0x32, 0x32, - 0xfb, 0x1e, 0x33, 0x86, 0xe7, 0x3e, 0x53, 0xfb, 0x7e, 0x89, 0xde, 0x38, 0xa6, 0x6f, 0xe6, 0x47, - 0x0c, 0x71, 0xae, 0x9d, 0x10, 0xe7, 0x22, 0x13, 0x5b, 0xbb, 0x43, 0xde, 0x80, 0x49, 0x16, 0x99, - 0x94, 0x39, 0xf9, 0x08, 0xa7, 0xce, 0x42, 0x6c, 0xc8, 0x7f, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, - 0x1f, 0x8a, 0x30, 0xc0, 0xb7, 0xdf, 0x21, 0x3c, 0x7c, 0x9f, 0x81, 0x61, 0xb7, 0xd9, 0x6c, 0xf3, - 0x64, 0xad, 0xfd, 0xb1, 0x59, 0x78, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0x96, 0x85, 0x26, 0xa1, 0x43, - 0xf0, 0x73, 0xde, 0xf1, 0xd9, 0x45, 0x27, 0x72, 0x38, 0x17, 0xa7, 0xee, 0xd9, 0x58, 0xe7, 0x80, - 0x3e, 0x0d, 0x10, 0x46, 0x81, 0xeb, 0x6d, 0xd0, 0x32, 0x91, 0x21, 0xe3, 0xe9, 0x0e, 0xd4, 0xaa, - 0x0a, 0x99, 0xd3, 0x8c, 0xcf, 0x1c, 0x05, 0xc0, 0x1a, 0x45, 0x34, 0x6b, 0xdc, 0xf4, 0x33, 0x89, - 0xb9, 0x03, 0x4e, 0x35, 0x9e, 0xb3, 0x99, 0x97, 0x61, 0x58, 0x11, 0xef, 0x26, 0x57, 0x1c, 0xd5, - 0x19, 0xb6, 0x8f, 0xc1, 0x44, 0xa2, 0x6f, 0x07, 0x12, 0x4b, 0xfe, 0xbe, 0x05, 0x13, 0xbc, 0x33, - 0x4b, 0xde, 0xb6, 0xb8, 0x0d, 0xee, 0xc1, 0xd1, 0x46, 0xc6, 0xa9, 0x2c, 0xa6, 0xbf, 0xf7, 0x53, - 0x5c, 0x89, 0x21, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x9e, 0xee, 0x38, 0x7a, 0xea, 0x3a, 0x0d, - 0x11, 0x99, 0x64, 0x94, 0xef, 0x36, 0x5e, 0x86, 0x15, 0xd4, 0xfe, 0x5b, 0x0b, 0xa6, 0x78, 0xcf, - 0xaf, 0x92, 0x1d, 0x75, 0x36, 0x7d, 0x27, 0xfb, 0x2e, 0xd2, 0x84, 0x17, 0x72, 0xd2, 0x84, 0xeb, - 0x9f, 0x56, 0xec, 0xf8, 0x69, 0x5f, 0xb5, 0x40, 0xac, 0x90, 0x43, 0x90, 0xb4, 0x7c, 0x9f, 0x29, - 0x69, 0x99, 0xc9, 0xdf, 0x04, 0x39, 0x22, 0x96, 0x7f, 0xb3, 0x60, 0x92, 0x23, 0xc4, 0x56, 0x10, - 0xdf, 0xd1, 0x79, 0x98, 0x37, 0xbf, 0x28, 0xd3, 0xac, 0xf5, 0x2a, 0xd9, 0x59, 0xf3, 0x2b, 0x4e, - 0xb4, 0x99, 0xfd, 0x51, 0xc6, 0x64, 0xf5, 0x75, 0x9c, 0xac, 0xba, 0xdc, 0x40, 0x46, 0x42, 0xc8, - 0x2e, 0x02, 0xe0, 0x83, 0x26, 0x84, 0xb4, 0xff, 0xd1, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca, - 0x0e, 0xb1, 0x52, 0xed, 0xa2, 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0x03, 0x19, 0x9e, 0x84, - 0x29, 0x4b, 0xb1, 0xbb, 0x29, 0xcb, 0x01, 0x46, 0xf4, 0xab, 0x83, 0x90, 0x74, 0xeb, 0x44, 0x37, - 0x61, 0xb4, 0xe6, 0xb4, 0x9c, 0xdb, 0x6e, 0xc3, 0x8d, 0x5c, 0x12, 0x76, 0xb2, 0x73, 0x5b, 0xd0, - 0xf0, 0x84, 0xf1, 0x81, 0x56, 0x82, 0x0d, 0x3a, 0x68, 0x16, 0xa0, 0x15, 0xb8, 0xdb, 0x6e, 0x83, - 0x6c, 0x30, 0x81, 0x10, 0x8b, 0x85, 0xc4, 0x8d, 0xee, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x42, 0x90, - 0x14, 0x1f, 0x72, 0x08, 0x12, 0x38, 0xb4, 0x10, 0x24, 0x7d, 0x07, 0x0a, 0x41, 0x32, 0x74, 0xe0, - 0x10, 0x24, 0xfd, 0x3d, 0x85, 0x20, 0xc1, 0x70, 0x5c, 0xf2, 0x9e, 0xf4, 0xff, 0xb2, 0xdb, 0x20, - 0xe2, 0xc1, 0xc1, 0x03, 0x38, 0xcd, 0xec, 0xed, 0x96, 0x8e, 0xe3, 0x4c, 0x0c, 0x9c, 0x53, 0x13, - 0x7d, 0x1c, 0xa6, 0x9d, 0x46, 0xc3, 0xbf, 0xa3, 0x26, 0x75, 0x29, 0xac, 0x39, 0x8d, 0x38, 0xae, - 0xdf, 0xd0, 0xfc, 0xa9, 0xbd, 0xdd, 0xd2, 0xf4, 0x5c, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x83, - 0xe1, 0x56, 0xe0, 0xd7, 0x56, 0x34, 0xdf, 0xf3, 0x33, 0x74, 0x00, 0x2b, 0xb2, 0x70, 0x7f, 0xb7, - 0x34, 0xa6, 0xfe, 0xb0, 0x0b, 0x3f, 0xae, 0x90, 0x11, 0xdd, 0x63, 0xe4, 0x61, 0x47, 0xf7, 0x18, - 0x7d, 0xc0, 0xd1, 0x3d, 0xec, 0x2d, 0x38, 0x52, 0x25, 0x81, 0xeb, 0x34, 0xdc, 0x7b, 0x94, 0x27, - 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x07, 0x89, 0x53, 0xbf, 0xa7, 0xa0, 0xe7, 0x9a, 0x5c, 0x46, 0x9e, - 0xf2, 0x31, 0x21, 0xfb, 0xff, 0xb7, 0x60, 0x50, 0xb8, 0x8a, 0x1e, 0x02, 0x67, 0x3a, 0x67, 0xa8, - 0x64, 0x4a, 0xd9, 0x93, 0xc2, 0x3a, 0x93, 0xab, 0x8c, 0x29, 0x27, 0x94, 0x31, 0x8f, 0x76, 0x22, - 0xd2, 0x59, 0x0d, 0xf3, 0x9f, 0x15, 0xe9, 0x0b, 0xc1, 0x08, 0x5a, 0xf0, 0xf0, 0x87, 0x60, 0x15, - 0x06, 0x43, 0xe1, 0x34, 0x5f, 0xc8, 0xf7, 0xe5, 0x49, 0x4e, 0x62, 0x6c, 0x03, 0x29, 0xdc, 0xe4, - 0x25, 0x91, 0x4c, 0x6f, 0xfc, 0xe2, 0x43, 0xf4, 0xc6, 0xef, 0x16, 0xd6, 0xa1, 0xef, 0x41, 0x84, - 0x75, 0xb0, 0xbf, 0xce, 0x6e, 0x67, 0xbd, 0xfc, 0x10, 0x18, 0xb7, 0xcb, 0xe6, 0x3d, 0x6e, 0x77, - 0x58, 0x59, 0xa2, 0x53, 0x39, 0x0c, 0xdc, 0xef, 0x5a, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x6e, 0xee, - 0x59, 0x18, 0x72, 0xda, 0x75, 0x57, 0xed, 0x65, 0x4d, 0x5b, 0x3c, 0x27, 0xca, 0xb1, 0xc2, 0x40, - 0x0b, 0x30, 0x45, 0xee, 0xb6, 0x5c, 0xae, 0x86, 0xd7, 0x4d, 0xc7, 0x8b, 0xdc, 0xbf, 0x78, 0x29, - 0x09, 0xc4, 0x69, 0x7c, 0x15, 0x1a, 0xae, 0x98, 0x1b, 0x1a, 0xee, 0x37, 0x2d, 0x18, 0x51, 0x6e, - 0xe3, 0x0f, 0x7d, 0xb4, 0xdf, 0x30, 0x47, 0xfb, 0x91, 0x0e, 0xa3, 0x9d, 0x33, 0xcc, 0x7f, 0x53, - 0x50, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x03, 0x97, 0x78, 0xff, 0x6e, 0x2f, 0x17, 0x61, 0xc4, 0x69, - 0xb5, 0x24, 0x40, 0xda, 0x2f, 0xb2, 0x14, 0x16, 0x71, 0x31, 0xd6, 0x71, 0x94, 0x17, 0x4e, 0x31, - 0xd7, 0x0b, 0xa7, 0x0e, 0x10, 0x39, 0xc1, 0x06, 0x89, 0x68, 0x99, 0x30, 0xb7, 0xce, 0x3f, 0x6f, - 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0xb2, 0x17, 0x5d, 0x0f, 0xf8, 0x33, - 0x55, 0x0b, 0xc0, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x0c, 0x91, 0xc2, 0xda, 0xe8, 0x37, 0x0d, 0x61, - 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0xcb, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0xc0, 0x82, 0xff, - 0x34, 0xaa, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf5, 0xf0, 0x85, 0x9d, 0x0f, 0x7b, 0xda, 0xb0, 0xee, - 0xcf, 0x1a, 0xc7, 0x38, 0x44, 0x9f, 0x4c, 0x19, 0x37, 0x3d, 0xd7, 0xe5, 0xd6, 0x38, 0x80, 0x39, - 0x13, 0xcb, 0x67, 0xc7, 0xb2, 0x7d, 0x95, 0x2b, 0x62, 0x5f, 0x68, 0xf9, 0xec, 0x04, 0x00, 0xc7, - 0x38, 0x94, 0x61, 0x53, 0x7f, 0xc2, 0x69, 0x14, 0x87, 0x3d, 0x57, 0xd8, 0x21, 0xd6, 0x30, 0xd0, - 0x05, 0x21, 0xb4, 0xe0, 0xba, 0x87, 0x47, 0x12, 0x42, 0x0b, 0x39, 0x5c, 0x9a, 0xa4, 0xe9, 0x22, - 0x8c, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0x41, 0x5b, 0xe8, 0x8f, 0xa3, 0xeb, 0x2e, 0xc5, 0xc5, - 0x58, 0xc7, 0x41, 0x6b, 0x30, 0x11, 0x72, 0x59, 0x9e, 0x4a, 0xb6, 0xc1, 0x65, 0xa2, 0x4f, 0x2b, - 0x87, 0x7d, 0x13, 0xbc, 0xcf, 0x8a, 0xf8, 0xe9, 0x24, 0xc3, 0x98, 0x24, 0x49, 0xa0, 0xd7, 0x61, - 0xbc, 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0x36, 0x3e, 0x43, 0x46, 0x2c, 0xcb, 0xf1, - 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x29, 0x83, 0xa8, 0x97, 0x88, 0x04, 0x31, 0x8e, 0xb7, 0x41, 0xc2, - 0xe9, 0x61, 0xf6, 0x55, 0x8c, 0x41, 0xbc, 0x96, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x4b, 0x30, 0x2a, - 0x3f, 0x5f, 0x8b, 0xfa, 0x13, 0x3b, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x8e, 0xc9, 0xff, - 0x6b, 0x81, 0xb3, 0xbe, 0xee, 0xd6, 0x44, 0x28, 0x0c, 0xee, 0xfc, 0xfd, 0x31, 0xe9, 0x69, 0xba, - 0x94, 0x85, 0xb4, 0xbf, 0x5b, 0x3a, 0x25, 0x46, 0x2d, 0x13, 0x8e, 0xb3, 0x69, 0xa3, 0x15, 0x38, - 0xc2, 0x6d, 0x60, 0x16, 0x36, 0x49, 0x6d, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x57, - 0xd2, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x07, 0xa6, 0x5b, 0xed, 0xdb, 0x0d, 0x37, 0xdc, 0x5c, 0xf5, - 0x23, 0x66, 0x42, 0x36, 0x57, 0xaf, 0x07, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x91, 0x9a, - 0x2a, 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x07, 0xc7, 0x12, 0x0b, 0x41, 0x84, 0x5c, 0x19, 0xcf, - 0x4f, 0xb5, 0x55, 0xcd, 0xaa, 0x20, 0xa2, 0x17, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x15, 0x00, - 0xb7, 0xb5, 0xec, 0x34, 0xdd, 0x06, 0x7d, 0x8e, 0x1e, 0x61, 0x6b, 0x84, 0x3e, 0x4d, 0xa0, 0x5c, - 0x91, 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc1, 0xb8, 0xf8, 0xb7, 0x23, - 0xa6, 0x74, 0x4a, 0x65, 0x65, 0x1d, 0x97, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, - 0x80, 0xd3, 0x32, 0x25, 0xac, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xbf, 0xd5, 0x10, 0xf7, 0x29, - 0x9a, 0xeb, 0x84, 0x88, 0x3b, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xb1, 0x38, - 0x22, 0xe8, 0xb5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0xc7, 0x5c, 0x2f, 0x6b, 0x55, 0x1f, 0x67, - 0x84, 0x3e, 0xca, 0x9d, 0xe5, 0x3b, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0xca, 0x70, 0x24, - 0xe2, 0x05, 0x8b, 0x6e, 0xc8, 0xd3, 0xe7, 0xd0, 0x67, 0xdf, 0x09, 0xd6, 0xdc, 0x09, 0xba, 0x9a, - 0xd7, 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0xbd, 0x19, 0x80, 0x7e, 0xc3, 0xa2, 0xb5, 0x35, 0x46, 0x1f, - 0x7d, 0x06, 0x46, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xb9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, - 0x41, 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x2d, 0x23, 0xc8, 0xc5, 0x85, 0xde, 0x98, 0xa2, - 0xde, 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x0d, 0x86, 0x6a, 0x0d, 0x97, 0x78, 0x51, 0xb9, - 0xd2, 0x29, 0x50, 0xeb, 0x82, 0xc0, 0x11, 0x5b, 0x51, 0x64, 0xbd, 0xe2, 0x65, 0x58, 0x51, 0xb0, - 0x2f, 0xc1, 0x48, 0xb5, 0x41, 0x48, 0x8b, 0xfb, 0x71, 0xa1, 0xa7, 0xd8, 0xc3, 0x84, 0xb1, 0x96, - 0x16, 0x63, 0x2d, 0xf5, 0x37, 0x07, 0x63, 0x2a, 0x25, 0xdc, 0xfe, 0xb3, 0x02, 0x94, 0xba, 0x24, - 0x5f, 0x4b, 0xe8, 0xdb, 0xac, 0x9e, 0xf4, 0x6d, 0x73, 0x30, 0x11, 0xff, 0xd3, 0x45, 0x79, 0xca, - 0x18, 0xfa, 0xa6, 0x09, 0xc6, 0x49, 0xfc, 0x9e, 0xfd, 0x5a, 0x74, 0x95, 0x5d, 0x5f, 0x57, 0xcf, - 0x2c, 0x43, 0x55, 0xdf, 0xdf, 0xfb, 0xdb, 0x3b, 0x57, 0xed, 0x6a, 0x7f, 0xbd, 0x00, 0xc7, 0xd4, - 0x10, 0x7e, 0xef, 0x0e, 0xdc, 0x8d, 0xf4, 0xc0, 0x3d, 0x00, 0xa5, 0xb5, 0x7d, 0x1d, 0x06, 0x78, - 0xf4, 0xd8, 0x1e, 0x78, 0xfe, 0xc7, 0xcc, 0x40, 0xfe, 0x8a, 0xcd, 0x34, 0x82, 0xf9, 0xff, 0x98, - 0x05, 0x13, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0xfb, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0xb3, - 0xd0, 0xb7, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xc5, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0xdf, 0x59, - 0xd0, 0xbf, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x97, 0xef, 0x42, 0x2f, - 0xc1, 0x00, 0x59, 0x5f, 0x27, 0xb5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0x31, 0xb0, 0xc4, 0x4a, 0x29, - 0x13, 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xb7, 0x60, 0x38, 0x72, 0x9b, 0x64, 0xae, 0x5e, - 0x17, 0x36, 0x01, 0xf7, 0x11, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0xcb, 0x05, 0x80, - 0x38, 0x5a, 0x5d, 0xb7, 0x4f, 0x9c, 0x4f, 0x69, 0x8b, 0xcf, 0x65, 0x68, 0x8b, 0x51, 0x4c, 0x30, - 0x43, 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd3, 0x30, 0xf5, 0x1d, 0x64, 0x98, 0x16, 0x60, 0x2a, 0x8e, - 0xb6, 0x67, 0x06, 0x1b, 0x65, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0xb3, 0x2a, - 0xe8, 0x98, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x32, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, - 0xab, 0x0e, 0xff, 0x45, 0x0b, 0x8e, 0x26, 0xdb, 0x61, 0x7e, 0xf7, 0x5f, 0xb4, 0xe0, 0x58, 0x9c, - 0x7b, 0x28, 0x6d, 0x82, 0xf0, 0x62, 0xc7, 0x40, 0x6a, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, - 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0x5f, 0x1f, 0x4c, 0xe7, 0x45, 0x60, 0x63, 0x9e, 0x46, 0xce, - 0xdd, 0xea, 0x16, 0xb9, 0x23, 0xfc, 0x39, 0x62, 0x4f, 0x23, 0x5e, 0x8c, 0x25, 0x3c, 0x99, 0x6e, - 0xaa, 0xd0, 0x63, 0xba, 0xa9, 0x4d, 0x98, 0xba, 0xb3, 0x49, 0xbc, 0x1b, 0x5e, 0xe8, 0x44, 0x6e, - 0xb8, 0xee, 0x32, 0x05, 0x3a, 0x5f, 0x37, 0xaf, 0x48, 0xaf, 0x8b, 0x5b, 0x49, 0x84, 0xfd, 0xdd, - 0xd2, 0x69, 0xa3, 0x20, 0xee, 0x32, 0x3f, 0x48, 0x70, 0x9a, 0x68, 0x3a, 0x5b, 0x57, 0xdf, 0x43, - 0xce, 0xd6, 0xd5, 0x74, 0x85, 0xd9, 0x8d, 0x74, 0x23, 0x61, 0xcf, 0xd6, 0x15, 0x55, 0x8a, 0x35, - 0x0c, 0xf4, 0x29, 0x40, 0x7a, 0xba, 0x45, 0x23, 0x00, 0xee, 0x73, 0x7b, 0xbb, 0x25, 0xb4, 0x9a, - 0x82, 0xee, 0xef, 0x96, 0x8e, 0xd0, 0xd2, 0xb2, 0x47, 0x9f, 0xbf, 0x71, 0xd4, 0xc0, 0x0c, 0x42, - 0xe8, 0x16, 0x4c, 0xd2, 0x52, 0xb6, 0xa3, 0x64, 0x74, 0x5d, 0xfe, 0x64, 0x7d, 0x66, 0x6f, 0xb7, - 0x34, 0xb9, 0x9a, 0x80, 0xe5, 0x91, 0x4e, 0x11, 0xc9, 0x48, 0xda, 0x35, 0xd4, 0x6b, 0xd2, 0x2e, - 0xfb, 0x8b, 0x16, 0x9c, 0xa4, 0x17, 0x5c, 0xfd, 0x5a, 0x8e, 0x16, 0xdd, 0x69, 0xb9, 0x5c, 0x4f, - 0x23, 0xae, 0x1a, 0x26, 0xab, 0xab, 0x94, 0xb9, 0x96, 0x46, 0x41, 0xe9, 0x09, 0xbf, 0xe5, 0x7a, - 0xf5, 0xe4, 0x09, 0x7f, 0xd5, 0xf5, 0xea, 0x98, 0x41, 0xd4, 0x95, 0x55, 0xcc, 0x8d, 0xd6, 0xff, - 0x35, 0xba, 0x57, 0x69, 0x5f, 0xbe, 0xa3, 0xdd, 0x40, 0xcf, 0xe8, 0x3a, 0x55, 0x61, 0x3e, 0x99, - 0xab, 0x4f, 0xfd, 0x82, 0x05, 0xc2, 0xfb, 0xbd, 0x87, 0x3b, 0xf9, 0x6d, 0x18, 0xdd, 0x4e, 0xa7, - 0xa2, 0x3d, 0x9b, 0x1f, 0x0e, 0x40, 0x24, 0xa0, 0x55, 0x2c, 0xba, 0x91, 0x76, 0xd6, 0xa0, 0x65, - 0xd7, 0x41, 0x40, 0x17, 0x09, 0xd3, 0x6a, 0x74, 0xef, 0xcd, 0xf3, 0x00, 0x75, 0x86, 0xcb, 0xf2, - 0xd3, 0x17, 0x4c, 0x8e, 0x6b, 0x51, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x7a, 0x11, 0x46, 0x64, 0xea, - 0xd3, 0xb6, 0xd7, 0x8b, 0xec, 0x51, 0x67, 0x9c, 0x0a, 0x5d, 0x19, 0xa7, 0x77, 0x60, 0x2a, 0x20, - 0xb5, 0x76, 0x10, 0xba, 0xdb, 0x44, 0x82, 0xc5, 0x26, 0x99, 0xe5, 0xc9, 0x22, 0x12, 0xc0, 0x7d, - 0x16, 0x22, 0x2b, 0x51, 0xc8, 0x94, 0xc6, 0x69, 0x42, 0xe8, 0x02, 0x0c, 0x33, 0xd1, 0x7b, 0x25, - 0x16, 0x08, 0x2b, 0xc1, 0xd7, 0x8a, 0x04, 0xe0, 0x18, 0x87, 0x3d, 0x0e, 0xda, 0xb7, 0x19, 0x7a, - 0xc2, 0x13, 0xbc, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x8f, 0xc3, 0x24, 0xaf, 0x17, 0xf8, 0x2d, 0x67, - 0x83, 0xab, 0x04, 0xfb, 0x55, 0x78, 0x9d, 0xc9, 0x95, 0x04, 0x6c, 0x7f, 0xb7, 0x74, 0x34, 0x59, - 0xc6, 0xba, 0x9d, 0xa2, 0xc2, 0x2c, 0xff, 0x78, 0x23, 0xf4, 0xce, 0x48, 0x19, 0x0c, 0xc6, 0x20, - 0xac, 0xe3, 0xd9, 0xff, 0x6a, 0xc1, 0x94, 0x36, 0x55, 0x3d, 0xe7, 0xeb, 0x30, 0x06, 0xa9, 0xd0, - 0xc3, 0x20, 0x1d, 0x2c, 0xda, 0x43, 0xe6, 0x0c, 0xf7, 0x3d, 0xa0, 0x19, 0xb6, 0x3f, 0x03, 0x28, - 0x9d, 0x57, 0x17, 0xbd, 0xc9, 0x0d, 0xf9, 0xdd, 0x80, 0xd4, 0x3b, 0x29, 0xfc, 0xf5, 0xc8, 0x39, - 0xd2, 0x73, 0x95, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xe3, 0x7d, 0x30, 0x99, 0x8c, 0xd5, 0x81, 0xae, - 0xc0, 0x00, 0xe7, 0xd2, 0x05, 0xf9, 0x0e, 0xf6, 0x64, 0x5a, 0x84, 0x0f, 0x9e, 0x4b, 0x87, 0x73, - 0xf7, 0xa2, 0x3e, 0x7a, 0x07, 0x46, 0xea, 0xfe, 0x1d, 0xef, 0x8e, 0x13, 0xd4, 0xe7, 0x2a, 0x65, - 0x71, 0x42, 0x64, 0x0a, 0xa0, 0x16, 0x63, 0x34, 0x3d, 0x6a, 0x08, 0xb3, 0x9d, 0x88, 0x41, 0x58, - 0x27, 0x87, 0xd6, 0x58, 0x7a, 0xa7, 0x75, 0x77, 0x63, 0xc5, 0x69, 0x75, 0xf2, 0xea, 0x5a, 0x90, - 0x48, 0x1a, 0xe5, 0x31, 0x91, 0x03, 0x8a, 0x03, 0x70, 0x4c, 0x08, 0x7d, 0x0e, 0x8e, 0x84, 0x39, - 0x2a, 0xb1, 0xbc, 0x34, 0xeb, 0x9d, 0xb4, 0x44, 0x5c, 0x98, 0x92, 0xa5, 0x3c, 0xcb, 0x6a, 0x06, - 0xdd, 0x05, 0x24, 0x44, 0xcf, 0x6b, 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0xd3, 0x3f, - 0x7d, 0x38, 0x5b, 0x4e, 0x90, 0xc4, 0xd6, 0xda, 0x66, 0xe1, 0x85, 0xd3, 0x18, 0x38, 0xa3, 0x0d, - 0xfb, 0x0b, 0x7d, 0x30, 0x23, 0x13, 0x59, 0x67, 0x78, 0xaf, 0x7c, 0xde, 0x4a, 0xb8, 0xaf, 0xbc, - 0x92, 0x7f, 0xd0, 0x3f, 0x34, 0x27, 0x96, 0x2f, 0xa5, 0x9d, 0x58, 0x5e, 0x3b, 0x60, 0x37, 0x1e, - 0x98, 0x2b, 0xcb, 0xf7, 0xac, 0xff, 0xc9, 0xde, 0x51, 0x30, 0xae, 0x66, 0x84, 0x79, 0xec, 0xf6, - 0x8a, 0x54, 0x1d, 0xe5, 0x3c, 0xff, 0xaf, 0x08, 0x1c, 0xe3, 0xb2, 0x1f, 0x95, 0x11, 0xde, 0xd9, - 0x39, 0xab, 0xe8, 0x50, 0x9a, 0xa4, 0xd9, 0x8a, 0x76, 0x16, 0xdd, 0x40, 0xf4, 0x38, 0x93, 0xe6, - 0x92, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0x6d, 0x98, 0xda, 0x60, 0x11, 0x9f, 0xb4, - 0x9c, 0xd2, 0xe2, 0x5c, 0xc8, 0xdc, 0xb7, 0x97, 0x17, 0x96, 0xf2, 0x13, 0x50, 0xf3, 0xc7, 0x5f, - 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa8, 0x73, 0x27, 0x5c, 0x6a, 0x38, 0x61, 0xe4, 0xd6, - 0xe6, 0x1b, 0x7e, 0x6d, 0xab, 0x1a, 0xf9, 0x81, 0x4c, 0x16, 0x99, 0xf9, 0xf6, 0x9a, 0xbb, 0x55, - 0x4d, 0xe1, 0x1b, 0xcd, 0x4f, 0xef, 0xed, 0x96, 0x8e, 0x66, 0x61, 0xe1, 0xcc, 0xb6, 0xd0, 0x2a, - 0x0c, 0x6e, 0xb8, 0x11, 0x26, 0x2d, 0x5f, 0x9c, 0x16, 0x99, 0x47, 0xe1, 0x65, 0x8e, 0x62, 0xb4, - 0xc4, 0x22, 0x52, 0x09, 0x00, 0x96, 0x44, 0xd0, 0x9b, 0xea, 0x12, 0x18, 0xc8, 0x17, 0xc0, 0xa6, - 0x6d, 0xef, 0x32, 0xaf, 0x81, 0xd7, 0xa1, 0xe8, 0xad, 0x87, 0x9d, 0x62, 0xf1, 0xac, 0x2e, 0x1b, - 0xf2, 0xb3, 0xf9, 0x41, 0xfa, 0x34, 0x5e, 0x5d, 0xae, 0x62, 0x5a, 0x91, 0xb9, 0xbd, 0x86, 0xb5, - 0xd0, 0x15, 0x89, 0xa7, 0x32, 0xbd, 0x80, 0xcb, 0xd5, 0x85, 0x6a, 0xd9, 0xa0, 0xc1, 0xa2, 0x1a, - 0xb2, 0x62, 0xcc, 0xab, 0xa3, 0x9b, 0x30, 0xbc, 0xc1, 0x0f, 0xbe, 0xf5, 0x50, 0x24, 0xb3, 0xcf, - 0xbc, 0x8c, 0x2e, 0x4b, 0x24, 0x83, 0x1e, 0xbb, 0x32, 0x14, 0x08, 0xc7, 0xa4, 0xd0, 0x17, 0x2c, - 0x38, 0xd6, 0x4a, 0x48, 0x50, 0x99, 0xb3, 0x9a, 0x30, 0x53, 0xcb, 0x74, 0x00, 0xa8, 0x64, 0x55, - 0x30, 0x1a, 0x64, 0xea, 0x97, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0xe0, 0x76, 0xbd, 0x53, - 0xae, 0xa2, 0x44, 0x60, 0x22, 0x3e, 0xd0, 0x78, 0x7e, 0x11, 0xd3, 0x8a, 0x68, 0x0d, 0x60, 0xbd, - 0x41, 0x44, 0xc4, 0x47, 0x61, 0x14, 0x95, 0x79, 0xfb, 0x2f, 0x2b, 0x2c, 0x41, 0x87, 0xbd, 0x44, - 0xe3, 0x52, 0xac, 0xd1, 0xa1, 0x4b, 0xa9, 0xe6, 0x7a, 0x75, 0x12, 0x30, 0xe5, 0x56, 0xce, 0x52, - 0x5a, 0x60, 0x18, 0xe9, 0xa5, 0xc4, 0xcb, 0xb1, 0xa0, 0xc0, 0x68, 0x91, 0xd6, 0xe6, 0x7a, 0xd8, - 0x29, 0x2b, 0xc6, 0x02, 0x69, 0x6d, 0x26, 0x16, 0x14, 0xa7, 0xc5, 0xca, 0xb1, 0xa0, 0x40, 0xb7, - 0xcc, 0x3a, 0xdd, 0x40, 0x24, 0x98, 0x9e, 0xc8, 0xdf, 0x32, 0xcb, 0x1c, 0x25, 0xbd, 0x65, 0x04, - 0x00, 0x4b, 0x22, 0xe8, 0xd3, 0x26, 0xb7, 0x33, 0xc9, 0x68, 0x3e, 0xd3, 0x85, 0xdb, 0x31, 0xe8, - 0x76, 0xe6, 0x77, 0x5e, 0x81, 0xc2, 0x7a, 0x8d, 0x29, 0xc5, 0x72, 0x74, 0x06, 0xcb, 0x0b, 0x06, - 0x35, 0x16, 0x65, 0x7e, 0x79, 0x01, 0x17, 0xd6, 0x6b, 0x74, 0xe9, 0x3b, 0xf7, 0xda, 0x01, 0x59, - 0x76, 0x1b, 0x44, 0x64, 0xc8, 0xc8, 0x5c, 0xfa, 0x73, 0x12, 0x29, 0xbd, 0xf4, 0x15, 0x08, 0xc7, - 0xa4, 0x28, 0xdd, 0x98, 0x07, 0x3b, 0x92, 0x4f, 0x57, 0xb1, 0x5a, 0x69, 0xba, 0x99, 0x5c, 0xd8, - 0x16, 0x8c, 0x6d, 0x87, 0xad, 0x4d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, 0x89, 0x54, 0x71, 0x53, - 0x20, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, 0xea, 0xc4, 0xb0, 0x49, - 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, 0x19, 0x11, 0xe7, 0xf8, - 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0xf1, 0x2e, 0x83, 0x9d, 0xea, 0x6f, - 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb5, 0xe9, 0x47, 0xbe, 0x97, 0xb8, 0xe4, - 0x4e, 0xe4, 0x5f, 0x34, 0x95, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, 0x33, 0xdb, 0xa2, 0x1f, - 0xd7, 0x92, 0x91, 0x01, 0x45, 0x16, 0x8f, 0xa7, 0x72, 0x02, 0x6b, 0xa6, 0xc3, 0x07, 0xf2, 0x8f, - 0x53, 0x20, 0x1c, 0x93, 0x42, 0x75, 0x18, 0x6f, 0x19, 0x11, 0x67, 0x59, 0x36, 0x92, 0x1c, 0xbe, - 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, 0xf7, 0xb8, 0xab, 0x1f, - 0x4b, 0x56, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, 0x4b, 0x22, 0x74, 0x34, - 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x9c, 0x3f, 0x79, 0x0a, 0xf6, 0x2c, 0x35, 0x91, 0x0c, 0xb3, 0x2e, - 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xca, 0x3f, 0xc9, 0x93, 0xd7, 0x1d, 0x3b, - 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0xf2, 0x95, 0xe4, 0xf4, 0x4b, 0x85, - 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, 0xdd, 0x99, 0x0e, 0x57, - 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, 0x4c, 0xe7, 0x7d, 0x1b, - 0xeb, 0xd0, 0x2a, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, 0x56, 0xcf, 0x01, 0x87, - 0x2f, 0xc3, 0x94, 0x72, 0x47, 0x6c, 0xb8, 0xb5, 0x1d, 0x2d, 0x49, 0xa9, 0x0a, 0xcd, 0x53, 0x4d, - 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x07, 0x13, 0x46, 0x61, 0x79, 0x51, 0x3c, 0xff, 0xe3, 0x4c, 0x1b, - 0x26, 0x18, 0x27, 0xf1, 0xed, 0xdf, 0xb0, 0xe0, 0x44, 0x4e, 0xfe, 0xfb, 0x9e, 0xe3, 0xe9, 0xae, - 0xc3, 0x44, 0xcb, 0xac, 0xda, 0x25, 0x04, 0xb8, 0x91, 0x65, 0x5f, 0xf5, 0x35, 0x01, 0xc0, 0x49, - 0xa2, 0xf6, 0xaf, 0x15, 0xe0, 0x74, 0x47, 0xfb, 0x7a, 0x84, 0xe1, 0xf8, 0x46, 0x33, 0x74, 0x16, - 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x2d, 0x28, 0x33, 0x54, 0xbf, - 0xbc, 0x52, 0x9d, 0x4b, 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x19, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x3d, - 0x71, 0xd3, 0xf4, 0x70, 0x46, 0x0d, 0xf4, 0x32, 0x8c, 0x29, 0xbb, 0x7d, 0x6d, 0xc6, 0xd9, 0x05, - 0x81, 0x75, 0x00, 0x36, 0xf1, 0xd0, 0x45, 0x9e, 0x82, 0x49, 0x24, 0xeb, 0x12, 0x2a, 0xd3, 0x09, - 0x99, 0x5f, 0x49, 0x14, 0x63, 0x1d, 0x67, 0xfe, 0xd2, 0x5f, 0x7c, 0xeb, 0xcc, 0x87, 0xfe, 0xea, - 0x5b, 0x67, 0x3e, 0xf4, 0xb7, 0xdf, 0x3a, 0xf3, 0xa1, 0x1f, 0xda, 0x3b, 0x63, 0xfd, 0xc5, 0xde, - 0x19, 0xeb, 0xaf, 0xf6, 0xce, 0x58, 0x7f, 0xbb, 0x77, 0xc6, 0xfa, 0xdf, 0xf7, 0xce, 0x58, 0x5f, - 0xfe, 0x3f, 0xce, 0x7c, 0xe8, 0x6d, 0x14, 0x47, 0xa8, 0xbe, 0x40, 0x67, 0xe7, 0xc2, 0xf6, 0xc5, - 0xff, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xf1, 0x8c, 0x4c, 0x2d, 0x26, 0x01, 0x00, + // 16665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x5b, 0x90, 0x5c, 0x49, + 0x76, 0x18, 0xb6, 0xb7, 0xaa, 0x9f, 0xa7, 0xdf, 0x89, 0x57, 0xa1, 0x07, 0x40, 0x61, 0xee, 0xcc, + 0x60, 0x30, 0x3b, 0x33, 0x8d, 0xc5, 0x3c, 0x76, 0xb1, 0x33, 0xb3, 0xc3, 0xe9, 0x27, 0xd0, 0x03, + 0x74, 0xa3, 0x26, 0xab, 0x01, 0xec, 0x63, 0x76, 0xb5, 0x17, 0x55, 0xd9, 0xdd, 0x77, 0xbb, 0xea, + 0xde, 0x9a, 0x7b, 0x6f, 0x35, 0xd0, 0x30, 0x15, 0xa4, 0x56, 0xe6, 0x4a, 0x4b, 0xd2, 0x11, 0x1b, + 0x0a, 0x4b, 0x72, 0x90, 0x0a, 0x7e, 0xe8, 0x45, 0xd2, 0xb4, 0x64, 0x52, 0xa4, 0x45, 0x59, 0x14, + 0x29, 0xda, 0x96, 0x23, 0x68, 0x7f, 0xc8, 0x14, 0x23, 0xcc, 0x65, 0x58, 0xe1, 0x96, 0xd9, 0xb6, + 0x42, 0xc1, 0x0f, 0x53, 0x0a, 0xda, 0x1f, 0x76, 0x87, 0x6c, 0x2a, 0xf2, 0x79, 0x33, 0xef, 0xab, + 0xaa, 0x31, 0x40, 0xef, 0x70, 0x63, 0xfe, 0xaa, 0xf2, 0x9c, 0x3c, 0x99, 0x37, 0x1f, 0x27, 0x4f, + 0x9e, 0x73, 0xf2, 0x1c, 0xb0, 0x77, 0xae, 0x85, 0x73, 0xae, 0x7f, 0xc5, 0xe9, 0xb8, 0x57, 0x1a, + 0x7e, 0x40, 0xae, 0xec, 0x5e, 0xbd, 0xb2, 0x45, 0x3c, 0x12, 0x38, 0x11, 0x69, 0xce, 0x75, 0x02, + 0x3f, 0xf2, 0x11, 0xe2, 0x38, 0x73, 0x4e, 0xc7, 0x9d, 0xa3, 0x38, 0x73, 0xbb, 0x57, 0x67, 0x5f, + 0xdd, 0x72, 0xa3, 0xed, 0xee, 0xfd, 0xb9, 0x86, 0xdf, 0xbe, 0xb2, 0xe5, 0x6f, 0xf9, 0x57, 0x18, + 0xea, 0xfd, 0xee, 0x26, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x1b, 0x71, 0x33, 0x6d, + 0xa7, 0xb1, 0xed, 0x7a, 0x24, 0xd8, 0xbb, 0xd2, 0xd9, 0xd9, 0x62, 0xed, 0x06, 0x24, 0xf4, 0xbb, + 0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, 0x5e, 0x69, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0x95, + 0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, 0xba, 0x99, 0xcf, 0xf7, 0xaa, 0x10, 0x36, 0xb6, 0x49, + 0xdb, 0x49, 0xd5, 0x7b, 0x3d, 0xaf, 0x5e, 0x37, 0x72, 0x5b, 0x57, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, + 0x59, 0xc9, 0xfe, 0xbe, 0x05, 0x17, 0xe7, 0xef, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0x85, + 0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0, + 0x57, 0x60, 0x64, 0x97, 0xfd, 0x5f, 0x5d, 0xaa, 0x58, 0x17, 0xad, 0xcb, 0xa3, 0x0b, 0xd3, 0xbf, + 0xb3, 0x5f, 0xfd, 0xcc, 0xc1, 0x7e, 0x75, 0xe4, 0xae, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x82, 0xa1, + 0xcd, 0x70, 0x63, 0xaf, 0x43, 0x2a, 0x25, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x56, 0xea, 0xb4, 0x14, + 0x0b, 0x28, 0xba, 0x02, 0xa3, 0x1d, 0x27, 0x88, 0xdc, 0xc8, 0xf5, 0xbd, 0x4a, 0xf9, 0xa2, 0x75, + 0x79, 0x70, 0x61, 0x46, 0xa0, 0x8e, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xb4, 0x1b, 0x01, 0x71, 0x9a, + 0xb7, 0xbd, 0xd6, 0x5e, 0x65, 0xe0, 0xa2, 0x75, 0x79, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61, + 0xd8, 0x3f, 0x53, 0x82, 0x91, 0xf9, 0xcd, 0x4d, 0xd7, 0x73, 0xa3, 0x3d, 0x74, 0x17, 0xc6, 0x3d, + 0xbf, 0x49, 0xe4, 0x7f, 0xf6, 0x15, 0x63, 0xaf, 0x5d, 0x9c, 0x4b, 0x2f, 0xa5, 0xb9, 0x75, 0x0d, + 0x6f, 0x61, 0xfa, 0x60, 0xbf, 0x3a, 0xae, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd6, 0xf1, 0x9b, + 0x8a, 0x6c, 0x89, 0x91, 0xad, 0x66, 0x91, 0xad, 0xc5, 0x68, 0x0b, 0x53, 0x07, 0xfb, 0xd5, 0x31, + 0xad, 0x00, 0xeb, 0x44, 0xd0, 0x7d, 0x98, 0xa2, 0x7f, 0xbd, 0xc8, 0x55, 0x74, 0xcb, 0x8c, 0xee, + 0x73, 0x79, 0x74, 0x35, 0xd4, 0x85, 0x13, 0x07, 0xfb, 0xd5, 0xa9, 0x44, 0x21, 0x4e, 0x12, 0xb4, + 0x7f, 0xda, 0x82, 0xa9, 0xf9, 0x4e, 0x67, 0x3e, 0x68, 0xfb, 0x41, 0x2d, 0xf0, 0x37, 0xdd, 0x16, + 0x41, 0x5f, 0x80, 0x81, 0x88, 0xce, 0x1a, 0x9f, 0xe1, 0xe7, 0xc4, 0xd0, 0x0e, 0xd0, 0xb9, 0x3a, + 0xdc, 0xaf, 0x9e, 0x48, 0xa0, 0xb3, 0xa9, 0x64, 0x15, 0xd0, 0x7b, 0x30, 0xdd, 0xf2, 0x1b, 0x4e, + 0x6b, 0xdb, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, 0xf2, 0x60, 0xbf, 0x3a, 0x7d, 0x2b, 0x01, 0xc3, + 0x29, 0x6c, 0xfb, 0x11, 0x4c, 0xce, 0x47, 0x91, 0xd3, 0xd8, 0x26, 0x4d, 0xbe, 0xa0, 0xd0, 0x1b, + 0x30, 0xe0, 0x39, 0x6d, 0xd9, 0x99, 0x8b, 0xb2, 0x33, 0xeb, 0x4e, 0x9b, 0x76, 0x66, 0xfa, 0x8e, + 0xe7, 0x7e, 0xd4, 0x15, 0x8b, 0x94, 0x96, 0x61, 0x86, 0x8d, 0x5e, 0x03, 0x68, 0x92, 0x5d, 0xb7, + 0x41, 0x6a, 0x4e, 0xb4, 0x2d, 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x52, 0x10, 0xac, 0x61, 0xd9, 0x0f, + 0x61, 0x74, 0x7e, 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x1d, 0x98, 0xea, 0x04, 0x64, 0x93, + 0x04, 0xaa, 0xa8, 0x62, 0x5d, 0x2c, 0x5f, 0x1e, 0x7b, 0xed, 0x72, 0xe6, 0xd8, 0x9b, 0xa8, 0xcb, + 0x5e, 0x14, 0xec, 0x2d, 0x9c, 0x11, 0xed, 0x4d, 0x25, 0xa0, 0x38, 0x49, 0xd9, 0xfe, 0x67, 0x25, + 0x38, 0x35, 0xff, 0xa8, 0x1b, 0x90, 0x25, 0x37, 0xdc, 0x49, 0x6e, 0xb8, 0xa6, 0x1b, 0xee, 0xac, + 0xc7, 0x23, 0xa0, 0x56, 0xfa, 0x92, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0a, 0xc3, 0xf4, 0xf7, 0x1d, + 0xbc, 0x2a, 0x3e, 0xf9, 0x84, 0x40, 0x1e, 0x5b, 0x72, 0x22, 0x67, 0x89, 0x83, 0xb0, 0xc4, 0x41, + 0x6b, 0x30, 0xd6, 0x60, 0xfc, 0x61, 0x6b, 0xcd, 0x6f, 0x12, 0xb6, 0xb6, 0x46, 0x17, 0x5e, 0xa6, + 0xe8, 0x8b, 0x71, 0xf1, 0xe1, 0x7e, 0xb5, 0xc2, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, + 0xb6, 0xda, 0xee, 0x03, 0x8c, 0x12, 0x64, 0x6c, 0xf5, 0xcb, 0xda, 0xce, 0x1d, 0x64, 0x3b, 0x77, + 0x3c, 0x7b, 0xd7, 0xa2, 0xab, 0x30, 0xb0, 0xe3, 0x7a, 0xcd, 0xca, 0x10, 0xa3, 0x75, 0x9e, 0xce, + 0xf9, 0x4d, 0xd7, 0x6b, 0x1e, 0xee, 0x57, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0xff, + 0xcb, 0x82, 0x2a, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, + 0x40, 0x5f, 0x03, 0x08, 0x49, 0x23, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0xd4, 0x15, 0x04, 0x6b, + 0x58, 0x94, 0x3f, 0x85, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x9f, 0xea, 0x12, 0x80, + 0x63, 0x1c, 0x83, 0x3f, 0x95, 0x7b, 0xf1, 0x27, 0xf4, 0x25, 0x98, 0x8a, 0x1b, 0x0b, 0x3b, 0x4e, + 0x43, 0x0e, 0x20, 0xdb, 0xc1, 0x75, 0x13, 0x84, 0x93, 0xb8, 0xf6, 0x7f, 0x6e, 0x89, 0xc5, 0x43, + 0xbf, 0xfa, 0x13, 0xfe, 0xad, 0xf6, 0x3f, 0xb2, 0x60, 0x78, 0xc1, 0xf5, 0x9a, 0xae, 0xb7, 0x85, + 0xbe, 0x09, 0x23, 0xf4, 0xa8, 0x6c, 0x3a, 0x91, 0x23, 0xd8, 0xf0, 0xe7, 0xb4, 0xbd, 0xa5, 0x4e, + 0xae, 0xb9, 0xce, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0xef, 0x7f, 0x8b, + 0x34, 0xa2, 0x35, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x28, 0x72, + 0x82, 0x2d, 0x12, 0x09, 0x7e, 0x9c, 0xc9, 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24, + 0x3e, 0xa5, 0x36, 0x58, 0x55, 0x2c, 0x48, 0xd8, 0xff, 0xdf, 0x30, 0x9c, 0x5d, 0xac, 0xaf, 0xe6, + 0xac, 0xab, 0x4b, 0x30, 0xd4, 0x0c, 0xdc, 0x5d, 0x12, 0x88, 0x71, 0x56, 0x54, 0x96, 0x58, 0x29, + 0x16, 0x50, 0x74, 0x0d, 0xc6, 0xf9, 0xf9, 0x78, 0xc3, 0xf1, 0x9a, 0x31, 0x7b, 0x14, 0xd8, 0xe3, + 0x77, 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x25, 0x36, 0x63, 0xde, 0xd9, 0xfb, 0x5d, + 0x0b, 0xa6, 0x79, 0x33, 0xf3, 0x51, 0x14, 0xb8, 0xf7, 0xbb, 0x11, 0x09, 0x2b, 0x83, 0x8c, 0xd3, + 0x2d, 0x66, 0x8d, 0x56, 0xee, 0x08, 0xcc, 0xdd, 0x4d, 0x50, 0xe1, 0x4c, 0xb0, 0x22, 0xda, 0x9d, + 0x4e, 0x82, 0x71, 0xaa, 0x59, 0xf4, 0x17, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, + 0x09, 0x6a, 0xdd, 0xfb, 0x2d, 0x37, 0xdc, 0xe6, 0xeb, 0x14, 0x93, 0x4d, 0xc6, 0x09, 0x72, 0xe6, + 0x50, 0x21, 0x89, 0x39, 0xbc, 0x70, 0xb0, 0x5f, 0x9d, 0x5d, 0xcc, 0x25, 0x85, 0x0b, 0x9a, 0x41, + 0x3b, 0x80, 0xe8, 0xc9, 0x5e, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0xdc, 0x7f, 0xe3, 0xa7, 0x0f, + 0xf6, 0xab, 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x1f, 0xc1, 0x49, 0x5a, 0x9a, 0xfa, 0xd6, + 0x91, 0xfe, 0x9b, 0xab, 0x1c, 0xec, 0x57, 0x4f, 0xae, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0x8f, + 0x5b, 0x70, 0x36, 0xfe, 0xfc, 0xe5, 0x87, 0x1d, 0xc7, 0x6b, 0xc6, 0x0d, 0x8f, 0xf6, 0xdf, 0x30, + 0xe5, 0xc9, 0x67, 0x17, 0xf3, 0x28, 0xe1, 0xfc, 0x46, 0x90, 0x07, 0x27, 0x68, 0xd7, 0x92, 0x6d, + 0x43, 0xff, 0x6d, 0x9f, 0x39, 0xd8, 0xaf, 0x9e, 0x58, 0x4f, 0xd3, 0xc0, 0x59, 0x84, 0x67, 0x17, + 0xe1, 0x54, 0xe6, 0xea, 0x44, 0xd3, 0x50, 0xde, 0x21, 0x5c, 0x08, 0x1c, 0xc5, 0xf4, 0x27, 0x3a, + 0x09, 0x83, 0xbb, 0x4e, 0xab, 0x2b, 0x36, 0x26, 0xe6, 0x7f, 0xde, 0x2a, 0x5d, 0xb3, 0xec, 0xff, + 0xbe, 0x0c, 0x53, 0x8b, 0xf5, 0xd5, 0xc7, 0xda, 0xf5, 0xfa, 0xb1, 0x57, 0x2a, 0x3c, 0xf6, 0xe2, + 0x43, 0xb4, 0x9c, 0x7b, 0x88, 0xfe, 0x58, 0xc6, 0x96, 0x1d, 0x60, 0x5b, 0xf6, 0x8b, 0x39, 0x5b, + 0xf6, 0x09, 0x6f, 0xd4, 0xdd, 0x9c, 0x55, 0x3b, 0xc8, 0x26, 0x30, 0x53, 0x42, 0x62, 0xb2, 0x5f, + 0x92, 0xd5, 0x1e, 0x71, 0xe9, 0x3e, 0x99, 0x79, 0x6c, 0xc0, 0xf8, 0xa2, 0xd3, 0x71, 0xee, 0xbb, + 0x2d, 0x37, 0x72, 0x49, 0x88, 0x5e, 0x84, 0xb2, 0xd3, 0x6c, 0x32, 0xe9, 0x6e, 0x74, 0xe1, 0xd4, + 0xc1, 0x7e, 0xb5, 0x3c, 0xdf, 0xa4, 0x62, 0x06, 0x28, 0xac, 0x3d, 0x4c, 0x31, 0xd0, 0x67, 0x61, + 0xa0, 0x19, 0xf8, 0x9d, 0x4a, 0x89, 0x61, 0xd2, 0x5d, 0x3e, 0xb0, 0x14, 0xf8, 0x9d, 0x04, 0x2a, + 0xc3, 0xb1, 0x7f, 0xbb, 0x04, 0xe7, 0x16, 0x49, 0x67, 0x7b, 0xa5, 0x9e, 0x73, 0x5e, 0x5c, 0x86, + 0x91, 0xb6, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0xd6, 0x44, 0x19, 0x56, 0x50, + 0x74, 0x11, 0x06, 0x3a, 0xb1, 0x10, 0x3b, 0x2e, 0x05, 0x60, 0x26, 0xbe, 0x32, 0x08, 0xc5, 0xe8, + 0x86, 0x24, 0x10, 0x2b, 0x46, 0x61, 0xdc, 0x09, 0x49, 0x80, 0x19, 0x24, 0x96, 0x04, 0xa8, 0x8c, + 0x20, 0x4e, 0x84, 0x84, 0x24, 0x40, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x18, 0x0d, 0x13, 0x33, 0xdb, + 0xd7, 0xd6, 0x9c, 0x60, 0xa2, 0x82, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0x82, 0x0d, 0xf5, 0x14, 0x15, + 0x7e, 0xa3, 0x04, 0x88, 0x0f, 0xe1, 0x9f, 0xb1, 0x81, 0xbb, 0x93, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4, + 0x93, 0x1a, 0xbd, 0xff, 0xdb, 0x82, 0x73, 0x8b, 0xae, 0xd7, 0x24, 0x41, 0xce, 0x02, 0x7c, 0x3a, + 0x57, 0xf9, 0xa3, 0x09, 0x29, 0xc6, 0x12, 0x1b, 0x78, 0x02, 0x4b, 0xcc, 0xfe, 0xb7, 0x16, 0x20, + 0xfe, 0xd9, 0x9f, 0xb8, 0x8f, 0xbd, 0x93, 0xfe, 0xd8, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9, + 0xd8, 0x72, 0x89, 0x17, 0xad, 0xd6, 0x16, 0x7d, 0x6f, 0xd3, 0xdd, 0x42, 0x6f, 0xc1, 0x64, 0xe4, + 0xb6, 0x89, 0xdf, 0x8d, 0xea, 0xa4, 0xe1, 0x7b, 0xec, 0xe6, 0x6a, 0x5d, 0x1e, 0x5c, 0x40, 0x07, + 0xfb, 0xd5, 0xc9, 0x0d, 0x03, 0x82, 0x13, 0x98, 0xf6, 0xdf, 0xa5, 0x7c, 0xab, 0xd5, 0x0d, 0x23, + 0x12, 0x6c, 0x04, 0xdd, 0x30, 0x5a, 0xe8, 0x52, 0xd9, 0xb3, 0x16, 0xf8, 0xb4, 0x3b, 0xae, 0xef, + 0xa1, 0x73, 0xc6, 0x75, 0x7c, 0x44, 0x5e, 0xc5, 0xc5, 0xb5, 0x7b, 0x0e, 0x20, 0x74, 0xb7, 0x3c, + 0x12, 0x68, 0xd7, 0x87, 0x49, 0xb6, 0x55, 0x54, 0x29, 0xd6, 0x30, 0x50, 0x0b, 0x26, 0x5a, 0xce, + 0x7d, 0xd2, 0xaa, 0x93, 0x16, 0x69, 0x44, 0x7e, 0x20, 0xf4, 0x1b, 0xaf, 0xf7, 0x77, 0x0f, 0xb8, + 0xa5, 0x57, 0x5d, 0x98, 0x39, 0xd8, 0xaf, 0x4e, 0x18, 0x45, 0xd8, 0x24, 0x4e, 0x59, 0x87, 0xdf, + 0xa1, 0x5f, 0xe1, 0xb4, 0xf4, 0xcb, 0xe7, 0x6d, 0x51, 0x86, 0x15, 0x54, 0xb1, 0x8e, 0x81, 0x3c, + 0xd6, 0x61, 0xff, 0x4b, 0xba, 0xd0, 0xfc, 0x76, 0xc7, 0xf7, 0x88, 0x17, 0x2d, 0xfa, 0x5e, 0x93, + 0x6b, 0xa6, 0xde, 0x32, 0x54, 0x27, 0x97, 0x12, 0xaa, 0x93, 0xd3, 0xe9, 0x1a, 0x9a, 0xf6, 0xe4, + 0x8b, 0x30, 0x14, 0x46, 0x4e, 0xd4, 0x0d, 0xc5, 0xc0, 0x3d, 0x2b, 0x97, 0x5d, 0x9d, 0x95, 0x1e, + 0xee, 0x57, 0xa7, 0x54, 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x12, 0x0c, 0xb7, 0x49, 0x18, 0x3a, + 0x5b, 0x52, 0x6c, 0x98, 0x12, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x1c, 0x0c, 0x92, + 0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x13, 0x02, 0x71, 0x70, 0x99, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x27, + 0x0b, 0xa6, 0x54, 0x5f, 0x79, 0x5b, 0xc7, 0x70, 0x5d, 0xfb, 0x2a, 0x40, 0x43, 0x7e, 0x60, 0xc8, + 0x8e, 0xd9, 0xb1, 0xd7, 0x2e, 0x65, 0x4a, 0x34, 0xa9, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b, + 0xd4, 0xec, 0xdf, 0xb4, 0xe0, 0x44, 0xe2, 0x8b, 0x6e, 0xb9, 0x61, 0x84, 0x3e, 0x4c, 0x7d, 0xd5, + 0x5c, 0x9f, 0x8b, 0xcf, 0x0d, 0xf9, 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0x83, + 0x6e, 0x44, 0xda, 0xf2, 0x63, 0x9e, 0x2b, 0xfc, 0x18, 0xde, 0xab, 0x78, 0x46, 0x56, 0x69, 0x4d, + 0xcc, 0x09, 0xd8, 0xbf, 0x5d, 0x86, 0x51, 0xbe, 0xbf, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xcb, + 0x30, 0xea, 0xb6, 0xdb, 0xdd, 0xc8, 0xb9, 0x2f, 0xce, 0xbd, 0x11, 0xce, 0x83, 0x56, 0x65, 0x21, + 0x8e, 0xe1, 0x68, 0x15, 0x06, 0x58, 0x57, 0xf8, 0x57, 0xbe, 0x98, 0xfd, 0x95, 0xa2, 0xef, 0x73, + 0x4b, 0x4e, 0xe4, 0x70, 0x91, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x77, + 0x3d, 0x27, 0xd8, 0xa3, 0x65, 0x95, 0x32, 0x23, 0xf8, 0x6a, 0x31, 0xc1, 0x05, 0x85, 0xcf, 0xc9, + 0xaa, 0x0f, 0x8b, 0x01, 0x58, 0x23, 0x3a, 0xfb, 0x05, 0x18, 0x55, 0xc8, 0x47, 0x91, 0x1c, 0x67, + 0xbf, 0x04, 0x53, 0x89, 0xb6, 0x7a, 0x55, 0x1f, 0xd7, 0x05, 0xcf, 0x7f, 0xcc, 0x58, 0x86, 0xe8, + 0xf5, 0xb2, 0xb7, 0x2b, 0xce, 0xa6, 0x47, 0x70, 0xb2, 0x95, 0xc1, 0xf2, 0xc5, 0xbc, 0xf6, 0x7f, + 0x44, 0x9c, 0x13, 0x9f, 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x30, 0x38, 0x62, 0xa9, 0x88, 0x23, + 0x52, 0x7e, 0x77, 0x52, 0x75, 0xfe, 0x26, 0xd9, 0x53, 0x4c, 0xf5, 0x07, 0xd9, 0xfd, 0xf3, 0x7c, + 0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0xca, 0x37, 0xc9, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xca, 0x85, + 0x5f, 0xf7, 0x2b, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x06, 0xbe, 0xb0, 0x60, 0xf2, 0x85, 0xf3, 0x85, + 0x0b, 0x3c, 0x87, 0x23, 0xfc, 0x46, 0x09, 0xce, 0x2a, 0x1c, 0x7a, 0x89, 0xe2, 0x7f, 0xc4, 0xaa, + 0xba, 0x02, 0xa3, 0x9e, 0x52, 0x27, 0x5a, 0xa6, 0x1e, 0x2f, 0x56, 0x26, 0xc6, 0x38, 0xf4, 0xc8, + 0xf3, 0xe2, 0x43, 0x7b, 0x5c, 0xd7, 0xb3, 0x8b, 0xc3, 0x7d, 0x01, 0xca, 0x5d, 0xb7, 0x29, 0x0e, + 0x98, 0xcf, 0xc9, 0xd1, 0xbe, 0xb3, 0xba, 0x74, 0xb8, 0x5f, 0x7d, 0x36, 0xcf, 0xe4, 0x44, 0x4f, + 0xb6, 0x70, 0xee, 0xce, 0xea, 0x12, 0xa6, 0x95, 0xd1, 0x3c, 0x4c, 0x49, 0xab, 0xda, 0x5d, 0x2a, + 0x97, 0xfa, 0x9e, 0x38, 0x87, 0x94, 0xb2, 0x1c, 0x9b, 0x60, 0x9c, 0xc4, 0x47, 0x4b, 0x30, 0xbd, + 0xd3, 0xbd, 0x4f, 0x5a, 0x24, 0xe2, 0x1f, 0x7c, 0x93, 0x70, 0x55, 0xf2, 0x68, 0x7c, 0x85, 0xbd, + 0x99, 0x80, 0xe3, 0x54, 0x0d, 0xfb, 0x4f, 0xd9, 0x79, 0x20, 0x46, 0x4f, 0x93, 0x6f, 0x7e, 0x90, + 0xcb, 0xb9, 0x9f, 0x55, 0x71, 0x93, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x5e, 0x15, 0xc6, 0x9a, + 0x1f, 0x28, 0x5c, 0xf3, 0xbf, 0x56, 0x82, 0x53, 0x6a, 0x04, 0x0c, 0x69, 0xf9, 0xcf, 0xfa, 0x18, + 0x5c, 0x85, 0xb1, 0x26, 0xd9, 0x74, 0xba, 0xad, 0x48, 0xd9, 0x35, 0x06, 0xb9, 0xa9, 0x6d, 0x29, + 0x2e, 0xc6, 0x3a, 0xce, 0x11, 0x86, 0xed, 0x6f, 0x4d, 0xb2, 0x83, 0x38, 0x72, 0xe8, 0x1a, 0x57, + 0xbb, 0xc6, 0xca, 0xdd, 0x35, 0xcf, 0xc1, 0xa0, 0xdb, 0xa6, 0x82, 0x59, 0xc9, 0x94, 0xb7, 0x56, + 0x69, 0x21, 0xe6, 0x30, 0xf4, 0x02, 0x0c, 0x37, 0xfc, 0x76, 0xdb, 0xf1, 0x9a, 0xec, 0xc8, 0x1b, + 0x5d, 0x18, 0xa3, 0xb2, 0xdb, 0x22, 0x2f, 0xc2, 0x12, 0x46, 0x85, 0x6f, 0x27, 0xd8, 0xe2, 0xca, + 0x1e, 0x21, 0x7c, 0xcf, 0x07, 0x5b, 0x21, 0x66, 0xa5, 0xf4, 0xae, 0xfa, 0xc0, 0x0f, 0x76, 0x5c, + 0x6f, 0x6b, 0xc9, 0x0d, 0xc4, 0x96, 0x50, 0x67, 0xe1, 0x3d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x81, + 0xc1, 0x8e, 0x1f, 0x44, 0x61, 0x65, 0x88, 0x0d, 0xf7, 0xb3, 0x39, 0x8c, 0x88, 0x7f, 0x6d, 0xcd, + 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xea, 0xe8, 0x16, 0x0c, 0x13, 0x6f, 0x77, 0x25, + 0xf0, 0xdb, 0x95, 0x13, 0xf9, 0x94, 0x96, 0x39, 0x0a, 0x5f, 0x66, 0xb1, 0x8c, 0x2a, 0x8a, 0xb1, + 0x24, 0x81, 0xbe, 0x08, 0x65, 0xe2, 0xed, 0x56, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0x77, 0x9d, + 0x20, 0xe6, 0xf9, 0xcb, 0xde, 0x2e, 0xa6, 0x75, 0xd0, 0x57, 0x60, 0x54, 0x32, 0x8c, 0x50, 0x68, + 0x51, 0x33, 0x17, 0xac, 0x64, 0x33, 0x98, 0x7c, 0xd4, 0x75, 0x03, 0xd2, 0x26, 0x5e, 0x14, 0xc6, + 0x1c, 0x52, 0x42, 0x43, 0x1c, 0x53, 0x43, 0x0d, 0x18, 0x0f, 0x48, 0xe8, 0x3e, 0x22, 0x35, 0xbf, + 0xe5, 0x36, 0xf6, 0x2a, 0x67, 0x58, 0xf7, 0x5e, 0x2a, 0x1c, 0x32, 0xac, 0x55, 0x88, 0xb5, 0xfc, + 0x7a, 0x29, 0x36, 0x88, 0xa2, 0x0f, 0x60, 0x22, 0x20, 0x61, 0xe4, 0x04, 0x91, 0x68, 0xa5, 0xa2, + 0xac, 0x72, 0x13, 0x58, 0x07, 0xf0, 0xeb, 0x44, 0xdc, 0x4c, 0x0c, 0xc1, 0x26, 0x05, 0x14, 0x01, + 0x32, 0x0a, 0x70, 0xb7, 0x45, 0xc2, 0xca, 0xd9, 0x7c, 0x6b, 0x66, 0x92, 0x2c, 0xad, 0xb0, 0x30, + 0x2b, 0x3a, 0x8f, 0x70, 0x8a, 0x16, 0xce, 0xa0, 0x8f, 0xbe, 0x22, 0x0d, 0x1d, 0x6b, 0x7e, 0xd7, + 0x8b, 0xc2, 0xca, 0x28, 0x6b, 0x2f, 0xd3, 0x22, 0x7e, 0x37, 0xc6, 0x4b, 0x5a, 0x42, 0x78, 0x65, + 0x6c, 0x90, 0x42, 0x5f, 0x87, 0x09, 0xfe, 0x9f, 0x1b, 0x72, 0xc3, 0xca, 0x29, 0x46, 0xfb, 0x62, + 0x3e, 0x6d, 0x8e, 0xb8, 0x70, 0x4a, 0x10, 0x9f, 0xd0, 0x4b, 0x43, 0x6c, 0x52, 0x43, 0x18, 0x26, + 0x5a, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0xd6, 0x02, 0xff, 0x3e, 0x11, 0x7a, 0xe9, 0xb3, 0xd9, 0x86, + 0x5f, 0xff, 0x3e, 0x11, 0x57, 0x4f, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x3b, 0x30, 0x19, 0x10, 0xa7, + 0xe9, 0xc6, 0x44, 0xc7, 0x7a, 0x11, 0x65, 0xd7, 0x75, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x36, + 0x8c, 0xb3, 0x81, 0xef, 0x76, 0x38, 0xd1, 0xd3, 0xbd, 0x88, 0x32, 0x37, 0x86, 0xba, 0x56, 0x05, + 0x1b, 0x04, 0xd0, 0xfb, 0x30, 0xda, 0x72, 0x37, 0x49, 0x63, 0xaf, 0xd1, 0x22, 0x95, 0x71, 0x46, + 0x2d, 0x93, 0x05, 0xdf, 0x92, 0x48, 0xfc, 0x56, 0xa0, 0xfe, 0xe2, 0xb8, 0x3a, 0xba, 0x0b, 0xa7, + 0x23, 0x12, 0xb4, 0x5d, 0xcf, 0xa1, 0xac, 0x53, 0x5c, 0x44, 0x99, 0x3d, 0x7e, 0x82, 0xad, 0xe9, + 0x0b, 0x62, 0x36, 0x4e, 0x6f, 0x64, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x21, 0x54, 0x32, 0x20, 0x7c, + 0xb7, 0x9c, 0x64, 0x94, 0xdf, 0x11, 0x94, 0x2b, 0x1b, 0x39, 0x78, 0x87, 0x05, 0x30, 0x9c, 0x4b, + 0x1d, 0xdd, 0x86, 0x29, 0xc6, 0xaf, 0x6b, 0xdd, 0x56, 0x4b, 0x34, 0x38, 0xc9, 0x1a, 0x7c, 0x41, + 0x4a, 0x2f, 0xab, 0x26, 0xf8, 0x70, 0xbf, 0x0a, 0xf1, 0x3f, 0x9c, 0xac, 0x8d, 0xee, 0x33, 0xd3, + 0x6f, 0x37, 0x70, 0xa3, 0x3d, 0xba, 0xe9, 0xc8, 0xc3, 0xa8, 0x32, 0x55, 0xa8, 0x06, 0xd3, 0x51, + 0x95, 0x7d, 0x58, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x00, 0x0a, 0xa3, 0xa6, 0xeb, 0x55, 0xa6, 0xf9, + 0x2d, 0x4e, 0xf2, 0xef, 0x3a, 0x2d, 0xc4, 0x1c, 0xc6, 0xcc, 0xbe, 0xf4, 0xc7, 0x6d, 0x7a, 0xce, + 0xcf, 0x30, 0xc4, 0xd8, 0xec, 0x2b, 0x01, 0x38, 0xc6, 0xa1, 0xa2, 0x77, 0x14, 0xed, 0x55, 0x10, + 0x43, 0x55, 0x6c, 0x78, 0x63, 0xe3, 0x2b, 0x98, 0x96, 0xdb, 0xbf, 0x6b, 0xc1, 0x45, 0xc5, 0x46, + 0x96, 0x1f, 0x46, 0xc4, 0x6b, 0x92, 0xa6, 0xce, 0x73, 0x49, 0x18, 0xa1, 0xb7, 0x61, 0xa2, 0x21, + 0x71, 0x34, 0x13, 0xb5, 0xda, 0xa5, 0x8b, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x1a, 0xe3, 0xc6, 0x8c, + 0x9e, 0xa6, 0x6c, 0xd2, 0x59, 0xac, 0x82, 0x61, 0x03, 0x13, 0xbd, 0x09, 0x63, 0x01, 0xef, 0x01, + 0xab, 0x58, 0x36, 0x3d, 0x25, 0x70, 0x0c, 0xc2, 0x3a, 0x9e, 0x7d, 0x1f, 0x26, 0x55, 0x87, 0xd8, + 0x34, 0xa3, 0x2a, 0x0c, 0x32, 0xf9, 0x59, 0xe8, 0xa1, 0x47, 0xe9, 0xa8, 0x32, 0xd9, 0x1a, 0xf3, + 0x72, 0x36, 0xaa, 0xee, 0x23, 0xb2, 0xb0, 0x17, 0x11, 0xae, 0xd4, 0x29, 0x6b, 0xa3, 0x2a, 0x01, + 0x38, 0xc6, 0xb1, 0xff, 0x7f, 0x7e, 0x0f, 0x89, 0x8f, 0xdb, 0x3e, 0x04, 0x8c, 0x57, 0x60, 0x84, + 0x79, 0xd0, 0xf8, 0x01, 0x37, 0x73, 0x0f, 0xc6, 0x37, 0x8f, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x63, + 0xcc, 0x59, 0x15, 0x2e, 0x1d, 0xa5, 0xc7, 0x9c, 0xd5, 0x33, 0x71, 0xd1, 0x35, 0x18, 0x61, 0xce, + 0x62, 0x0d, 0xbf, 0x25, 0xc4, 0x76, 0x29, 0xe2, 0x8d, 0xd4, 0x44, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, + 0x6c, 0x74, 0x09, 0x86, 0x68, 0x17, 0x56, 0x6b, 0x42, 0x2e, 0x51, 0x2a, 0xd5, 0x1b, 0xac, 0x14, + 0x0b, 0xa8, 0xfd, 0x9b, 0x16, 0x13, 0x4a, 0xd3, 0x87, 0x27, 0xba, 0x91, 0x98, 0x6f, 0x3e, 0x20, + 0xcf, 0x67, 0xcd, 0xf7, 0x61, 0xf1, 0xfc, 0x7f, 0x35, 0x79, 0xc4, 0xf2, 0xa5, 0xf3, 0x86, 0x1c, + 0x82, 0xe4, 0x31, 0xfb, 0x4c, 0xbc, 0x6e, 0x69, 0x7f, 0x8a, 0xce, 0x5a, 0xfb, 0xb7, 0xf8, 0x35, + 0x39, 0x75, 0x7c, 0xa2, 0x25, 0x18, 0x72, 0xd8, 0x0d, 0x43, 0x74, 0xfc, 0x15, 0x39, 0x00, 0xf3, + 0xac, 0xf4, 0x50, 0xd8, 0xab, 0x93, 0xf5, 0x38, 0x14, 0x8b, 0xba, 0xe8, 0x9b, 0x30, 0x4a, 0x1e, + 0xba, 0xd1, 0xa2, 0xdf, 0x14, 0x0b, 0xca, 0xd4, 0x95, 0x16, 0x9e, 0xe0, 0xb7, 0xbd, 0x65, 0x59, + 0x95, 0x33, 0x6d, 0xf5, 0x17, 0xc7, 0x44, 0xed, 0x9f, 0xb3, 0xa0, 0xda, 0xa3, 0x36, 0xba, 0x47, + 0x85, 0x65, 0x12, 0x38, 0x91, 0x2f, 0xed, 0x9e, 0x6f, 0xcb, 0x65, 0x70, 0x5b, 0x94, 0x1f, 0xee, + 0x57, 0x5f, 0xec, 0x41, 0x46, 0xa2, 0x62, 0x45, 0x0c, 0xd9, 0x30, 0xc4, 0xd4, 0x25, 0x5c, 0xfa, + 0x1f, 0xe4, 0xc6, 0xcf, 0xbb, 0xac, 0x04, 0x0b, 0x88, 0xfd, 0x57, 0x4a, 0xda, 0x3e, 0xac, 0x47, + 0x4e, 0x44, 0x50, 0x0d, 0x86, 0x1f, 0x38, 0x6e, 0xe4, 0x7a, 0x5b, 0xe2, 0x8a, 0x52, 0x2c, 0x93, + 0xb1, 0x4a, 0xf7, 0x78, 0x05, 0x2e, 0x68, 0x8b, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0xae, 0xe7, + 0x51, 0x8a, 0xa5, 0x7e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0x3e, 0x04, + 0x90, 0xc7, 0x0a, 0x69, 0x0a, 0x35, 0xf7, 0x2b, 0xbd, 0x89, 0x6e, 0xa8, 0x3a, 0x5c, 0x8f, 0x1e, + 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0x76, 0x8d, 0xde, 0x19, 0xf4, 0x35, 0xca, 0xd7, 0x9d, 0x20, + 0x22, 0xcd, 0xf9, 0x48, 0x0c, 0xce, 0x67, 0xfb, 0xd3, 0x63, 0x6c, 0xb8, 0x6d, 0xa2, 0x9f, 0x01, + 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x5e, 0x86, 0x4a, 0x5e, 0x77, 0x29, 0x5b, 0x92, 0xab, 0x4a, + 0xd8, 0x1f, 0x14, 0x5b, 0x92, 0x4b, 0x00, 0x2b, 0x0c, 0xca, 0x1f, 0x42, 0x77, 0x4b, 0xaa, 0xa1, + 0x06, 0x63, 0xfe, 0x50, 0x67, 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0xf8, 0x89, 0x6a, + 0x7c, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0x42, 0x7c, 0xa0, 0x87, 0x42, 0xdc, 0x18, 0xa2, 0xc1, + 0x27, 0x3b, 0x44, 0xe8, 0x1b, 0x00, 0x9b, 0xae, 0xe7, 0x86, 0xdb, 0x8c, 0xfa, 0xd0, 0x91, 0xa9, + 0xab, 0xfb, 0xdb, 0x8a, 0xa2, 0x82, 0x35, 0x8a, 0xf4, 0x2c, 0x53, 0x2c, 0x7a, 0x75, 0x89, 0x79, + 0xa9, 0x68, 0x67, 0x59, 0x7c, 0x5e, 0x2d, 0x61, 0x1d, 0xcf, 0xfe, 0x56, 0x72, 0xbd, 0x88, 0x1d, + 0xa0, 0x8d, 0xaf, 0xd5, 0xef, 0xf8, 0x96, 0x8a, 0xc7, 0xd7, 0xfe, 0x17, 0xa3, 0x30, 0x65, 0x34, + 0xd6, 0x0d, 0xfb, 0x38, 0xd5, 0xae, 0x53, 0xa9, 0xc5, 0x89, 0x88, 0xd8, 0x7f, 0x76, 0xef, 0xad, + 0xa2, 0x4b, 0x36, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x1b, 0x30, 0xda, 0x72, 0x42, 0xa6, 0x5c, 0x27, + 0x62, 0xdf, 0xf5, 0x43, 0x2c, 0xd6, 0x5d, 0x38, 0x61, 0xa4, 0x89, 0x8a, 0x9c, 0x76, 0x4c, 0x92, + 0x8a, 0x57, 0x54, 0x28, 0x97, 0x8e, 0xc8, 0xaa, 0x13, 0x54, 0x72, 0xdf, 0xc3, 0x1c, 0x26, 0x84, + 0x15, 0xba, 0x2a, 0x16, 0xe9, 0x15, 0x86, 0x2d, 0xb3, 0x41, 0x43, 0x58, 0x51, 0x30, 0x6c, 0x60, + 0xc6, 0xea, 0x83, 0xa1, 0x02, 0xf5, 0xc1, 0x4b, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0x63, + 0x95, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x91, 0xfe, 0x16, 0x0c, 0x7a, 0x01, 0x86, 0xc5, 0xa2, + 0x66, 0x1e, 0x42, 0x23, 0x9c, 0xcb, 0x89, 0x25, 0x8f, 0x25, 0x0c, 0xfd, 0xbc, 0x05, 0xc8, 0x69, + 0xb5, 0xfc, 0x06, 0xe3, 0x50, 0xea, 0x1e, 0x0e, 0xec, 0x7e, 0xf6, 0x76, 0xcf, 0x61, 0xef, 0x86, + 0x73, 0xf3, 0xa9, 0xda, 0x5c, 0xa9, 0xff, 0x96, 0xbc, 0x7e, 0xa6, 0x11, 0xf4, 0xe3, 0xfe, 0x96, + 0x1b, 0x46, 0xdf, 0xfe, 0x57, 0x89, 0xe3, 0x3f, 0xa3, 0x4b, 0xe8, 0x8e, 0xae, 0x27, 0x18, 0x3b, + 0xa2, 0x9e, 0x60, 0x22, 0x57, 0x47, 0xf0, 0xe7, 0x12, 0xb7, 0xde, 0x71, 0xf6, 0xe5, 0x2f, 0xf4, + 0xb8, 0xf5, 0x0a, 0xcb, 0x4f, 0x3f, 0x77, 0xdf, 0x9a, 0x70, 0x59, 0x98, 0x60, 0x5d, 0x2e, 0xd6, + 0xd7, 0xdc, 0x09, 0x49, 0xb0, 0x70, 0x56, 0x7a, 0x34, 0x1c, 0xea, 0xd2, 0x9d, 0xe6, 0xe2, 0xf0, + 0xe3, 0x16, 0x54, 0xd2, 0x03, 0xc4, 0xbb, 0x54, 0x99, 0x64, 0xfd, 0xb7, 0x8b, 0x46, 0x46, 0x74, + 0x5e, 0x7a, 0x66, 0x57, 0xe6, 0x73, 0x68, 0xe1, 0xdc, 0x56, 0xd0, 0x35, 0x80, 0x30, 0xf2, 0x3b, + 0x9c, 0xd7, 0xb3, 0x1b, 0xd0, 0x28, 0xf3, 0x0d, 0x82, 0xba, 0x2a, 0x3d, 0x8c, 0xcf, 0x02, 0x0d, + 0x77, 0xb6, 0x0b, 0x67, 0x72, 0x56, 0x4c, 0x86, 0x69, 0x66, 0x49, 0x37, 0xcd, 0xf4, 0x50, 0xe8, + 0xcf, 0xc9, 0x39, 0x9d, 0xfb, 0xa0, 0xeb, 0x78, 0x91, 0x1b, 0xed, 0xe9, 0xa6, 0x1c, 0x0f, 0xcc, + 0xa1, 0x44, 0x5f, 0x87, 0xc1, 0x96, 0xeb, 0x75, 0x1f, 0x8a, 0x33, 0xf6, 0x52, 0xf6, 0x9d, 0xd9, + 0xeb, 0x3e, 0x34, 0x27, 0xa7, 0x4a, 0xb7, 0x32, 0x2b, 0x3f, 0xdc, 0xaf, 0xa2, 0x34, 0x02, 0xe6, + 0x54, 0xed, 0xcf, 0xc2, 0xe4, 0x92, 0x43, 0xda, 0xbe, 0xb7, 0xec, 0x35, 0x3b, 0xbe, 0xeb, 0x45, + 0xa8, 0x02, 0x03, 0x4c, 0x7c, 0xe7, 0x47, 0xeb, 0x00, 0x1d, 0x7c, 0xcc, 0x4a, 0xec, 0x2d, 0x38, + 0xb5, 0xe4, 0x3f, 0xf0, 0x1e, 0x38, 0x41, 0x73, 0xbe, 0xb6, 0xaa, 0xa9, 0xb6, 0xd7, 0xa5, 0x6a, + 0xd5, 0xca, 0x57, 0x5c, 0x69, 0x35, 0xf9, 0x22, 0x5c, 0x71, 0x5b, 0x24, 0xc7, 0x00, 0xf1, 0xd7, + 0x4b, 0x46, 0x4b, 0x31, 0xbe, 0x32, 0x9f, 0x5b, 0xb9, 0x9e, 0x37, 0x1f, 0xc0, 0xc8, 0xa6, 0x4b, + 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x6c, 0xbc, 0x98, 0xef, 0x9b, 0xbb, 0x42, 0x31, 0x95, 0x9d, 0x9f, + 0x29, 0x66, 0x57, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x96, 0x73, 0x26, 0xa1, 0x82, 0xdf, + 0xbf, 0x54, 0xb4, 0x7c, 0x4d, 0xe2, 0xec, 0x9d, 0x02, 0x4e, 0x90, 0xc1, 0x29, 0xc2, 0xe8, 0x1c, + 0x0c, 0xb4, 0xa9, 0x64, 0x33, 0xc0, 0x86, 0x9f, 0x69, 0x62, 0x99, 0x52, 0x99, 0x95, 0xda, 0x7f, + 0xc3, 0x82, 0x33, 0xa9, 0x91, 0x11, 0xca, 0xf5, 0x27, 0x3c, 0x0b, 0x49, 0x65, 0x77, 0xa9, 0xb7, + 0xb2, 0xdb, 0xfe, 0x2f, 0x2c, 0x38, 0xb9, 0xdc, 0xee, 0x44, 0x7b, 0x4b, 0xae, 0xe9, 0x26, 0xf3, + 0x05, 0x18, 0x6a, 0x93, 0xa6, 0xdb, 0x6d, 0x8b, 0x99, 0xab, 0xca, 0xd3, 0x7f, 0x8d, 0x95, 0x52, + 0x0e, 0x52, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, 0x26, 0x43, 0xb9, 0x8f, 0xc8, + 0x2d, 0xb7, 0xed, 0x46, 0x8f, 0xb7, 0xbb, 0x84, 0x87, 0x8b, 0x24, 0x82, 0x63, 0x7a, 0xf6, 0xf7, + 0x2d, 0x98, 0x92, 0xeb, 0x7e, 0xbe, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0x28, 0xb9, 0x1d, 0xd1, + 0x4b, 0x10, 0xbd, 0x2c, 0xad, 0xd6, 0x70, 0xc9, 0xed, 0xc8, 0x0b, 0xb1, 0x17, 0x5f, 0xee, 0x8d, + 0x0b, 0xb1, 0xc7, 0xde, 0x4c, 0x48, 0x0c, 0x74, 0x19, 0x46, 0x3c, 0xbf, 0xc9, 0xef, 0x94, 0xc2, + 0xdd, 0x83, 0x62, 0xae, 0x8b, 0x32, 0xac, 0xa0, 0xa8, 0x06, 0xa3, 0xdc, 0x15, 0x3c, 0x5e, 0xb4, + 0x7d, 0x39, 0x94, 0xb3, 0x2f, 0xdb, 0x90, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0xa7, 0x16, 0x8c, 0xcb, + 0x2f, 0xeb, 0xf3, 0xb6, 0x4f, 0xb7, 0x56, 0x7c, 0xd3, 0x8f, 0xb7, 0x16, 0xbd, 0xad, 0x33, 0x88, + 0x71, 0x49, 0x2f, 0x1f, 0xe9, 0x92, 0x7e, 0x15, 0xc6, 0x9c, 0x4e, 0xa7, 0x66, 0xde, 0xf0, 0xd9, + 0x52, 0x9a, 0x8f, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x6c, 0x09, 0x26, 0xe5, 0x17, 0xd4, 0xbb, 0xf7, + 0x43, 0x12, 0xa1, 0x0d, 0x18, 0x75, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x9f, 0xcb, 0x56, 0xe1, 0x1b, + 0x53, 0x1a, 0x0b, 0xd2, 0xf3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x16, 0xcc, 0x78, 0x7e, 0xc4, 0x84, + 0x2a, 0x05, 0x2f, 0xf2, 0xaa, 0x48, 0x52, 0x3f, 0x2b, 0xa8, 0xcf, 0xac, 0x27, 0xa9, 0xe0, 0x34, + 0x61, 0xb4, 0x2c, 0xcd, 0x22, 0xe5, 0x7c, 0xcd, 0xb2, 0x3e, 0x71, 0xd9, 0x56, 0x11, 0xfb, 0x9f, + 0x58, 0x30, 0x2a, 0xd1, 0x8e, 0xc3, 0x81, 0x66, 0x0d, 0x86, 0x43, 0x36, 0x09, 0x72, 0x68, 0xec, + 0xa2, 0x8e, 0xf3, 0xf9, 0x8a, 0x65, 0x45, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0xb3, 0x8a, 0xab, 0xee, + 0x7f, 0x42, 0xac, 0xe2, 0xaa, 0x3f, 0x39, 0x87, 0xd2, 0xbf, 0x61, 0x7d, 0xd6, 0xcc, 0x4c, 0xf4, + 0x4a, 0xd3, 0x09, 0xc8, 0xa6, 0xfb, 0x30, 0x79, 0xa5, 0xa9, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x43, + 0x18, 0x6f, 0x48, 0x73, 0x68, 0xbc, 0xc3, 0x2f, 0x15, 0x9a, 0xe6, 0x95, 0x17, 0x07, 0x57, 0xac, + 0x2f, 0x6a, 0xf5, 0xb1, 0x41, 0xcd, 0x74, 0x75, 0x2c, 0xf7, 0x72, 0x75, 0x8c, 0xe9, 0xe6, 0x3b, + 0xfe, 0xfd, 0x9c, 0x05, 0x43, 0xdc, 0x0c, 0xd6, 0x9f, 0x15, 0x52, 0x73, 0x6a, 0x89, 0xc7, 0x8e, + 0x29, 0x57, 0x84, 0x64, 0x83, 0xd6, 0x60, 0x94, 0xfd, 0x60, 0x66, 0xbc, 0x72, 0xfe, 0xc3, 0x48, + 0xde, 0xaa, 0xde, 0xc1, 0xbb, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x7f, 0x54, 0xa6, 0xdc, 0x2d, 0x46, + 0x35, 0x0e, 0x7d, 0xeb, 0xe9, 0x1d, 0xfa, 0xa5, 0xa7, 0x75, 0xe8, 0x6f, 0xc1, 0x54, 0x43, 0x73, + 0x81, 0x89, 0x67, 0xf2, 0x72, 0xe1, 0x22, 0xd1, 0xbc, 0x65, 0xb8, 0xca, 0x7e, 0xd1, 0x24, 0x82, + 0x93, 0x54, 0xd1, 0xd7, 0x60, 0x9c, 0xcf, 0xb3, 0x68, 0x85, 0x7b, 0x8b, 0xbe, 0x90, 0xbf, 0x5e, + 0xf4, 0x26, 0xb8, 0x89, 0x47, 0xab, 0x8e, 0x0d, 0x62, 0xa8, 0x0e, 0xb0, 0xe9, 0xb6, 0x88, 0x20, + 0x5d, 0xe0, 0xd8, 0xbd, 0xc2, 0xb1, 0x14, 0xe1, 0x49, 0xae, 0x87, 0x90, 0x55, 0xb1, 0x46, 0xc6, + 0xfe, 0x77, 0x16, 0xa0, 0xe5, 0xce, 0x36, 0x69, 0x93, 0xc0, 0x69, 0xc5, 0xe6, 0xf1, 0x9f, 0xb4, + 0xa0, 0x42, 0x52, 0xc5, 0x8b, 0x7e, 0xbb, 0x2d, 0x34, 0x0c, 0x39, 0x4a, 0xb0, 0xe5, 0x9c, 0x3a, + 0xf1, 0x2d, 0x23, 0x0f, 0x03, 0xe7, 0xb6, 0x87, 0xd6, 0xe0, 0x04, 0x3f, 0x7a, 0x0d, 0xbb, 0x82, + 0xd8, 0x11, 0xcf, 0x08, 0xc2, 0x27, 0x36, 0xd2, 0x28, 0x38, 0xab, 0x9e, 0xfd, 0x0f, 0x26, 0x21, + 0xb7, 0x17, 0x9f, 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0xc0, 0xa7, 0x7e, 0x01, 0x9f, + 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0x80, 0xe6, 0x17, 0xf0, 0x57, 0x2d, 0x38, 0xa5, + 0x0e, 0x4d, 0x43, 0xf7, 0xf0, 0xa3, 0x70, 0x82, 0x6f, 0xb7, 0xc5, 0x96, 0xe3, 0xb6, 0x37, 0x48, + 0xbb, 0xd3, 0x72, 0x22, 0xe9, 0x73, 0x78, 0x35, 0x73, 0xe5, 0x26, 0x1e, 0x36, 0x19, 0x15, 0xf9, + 0x0b, 0xd1, 0x0c, 0x00, 0xce, 0x6a, 0xc6, 0xfe, 0xf5, 0x11, 0x18, 0x5c, 0xde, 0x25, 0x5e, 0x74, + 0x0c, 0xb7, 0xb4, 0x06, 0x4c, 0xba, 0xde, 0xae, 0xdf, 0xda, 0x25, 0x4d, 0x0e, 0x3f, 0x8a, 0x32, + 0xe1, 0xb4, 0x20, 0x3d, 0xb9, 0x6a, 0x90, 0xc0, 0x09, 0x92, 0x4f, 0xc3, 0x50, 0x76, 0x1d, 0x86, + 0xf8, 0x91, 0x27, 0x84, 0xc6, 0x4c, 0x9e, 0xcd, 0x06, 0x51, 0x1c, 0xe4, 0xb1, 0x11, 0x8f, 0x1f, + 0xa9, 0xa2, 0x3a, 0xfa, 0x16, 0x4c, 0x6e, 0xba, 0x41, 0x18, 0x6d, 0xb8, 0x6d, 0x7a, 0x3e, 0xb4, + 0x3b, 0x8f, 0x61, 0x18, 0x53, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x0b, 0x26, 0x5a, + 0x8e, 0xde, 0xd4, 0xf0, 0x91, 0x9b, 0x52, 0xa7, 0xc3, 0x2d, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed, + 0xd4, 0x60, 0xb6, 0x9d, 0x11, 0xa6, 0x99, 0x51, 0xdb, 0x89, 0x1b, 0x75, 0x38, 0x8c, 0x8a, 0x85, + 0xec, 0x79, 0xd0, 0xa8, 0x29, 0x16, 0x6a, 0x8f, 0x80, 0xbe, 0x09, 0xa3, 0x84, 0x0e, 0x21, 0x25, + 0x2c, 0x0e, 0x98, 0x2b, 0xfd, 0xf5, 0x75, 0xcd, 0x6d, 0x04, 0xbe, 0x69, 0x92, 0x5c, 0x96, 0x94, + 0x70, 0x4c, 0x14, 0x2d, 0xc2, 0x50, 0x48, 0x02, 0x57, 0x99, 0x3d, 0x0a, 0xa6, 0x91, 0xa1, 0x71, + 0x2b, 0x3c, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x84, 0x3b, 0xc3, 0xb8, 0xb9, 0xbc, 0x12, 0x0e, + 0x0b, 0xef, 0xc3, 0x70, 0x40, 0x5a, 0xcc, 0xe6, 0x3d, 0xd1, 0xff, 0x22, 0xe7, 0x26, 0x74, 0x5e, + 0x0f, 0x4b, 0x02, 0xe8, 0x26, 0x95, 0x57, 0xa8, 0x58, 0xe9, 0x7a, 0x5b, 0xea, 0xd1, 0x8c, 0x60, + 0xb4, 0x4a, 0x7c, 0xc7, 0x31, 0x86, 0x7c, 0x7d, 0x8e, 0x33, 0xaa, 0xa1, 0xeb, 0x30, 0xa3, 0x4a, + 0x57, 0xbd, 0x30, 0x72, 0x28, 0x83, 0xe3, 0x96, 0x07, 0xa5, 0x2a, 0xc2, 0x49, 0x04, 0x9c, 0xae, + 0x63, 0xff, 0xa2, 0x05, 0x7c, 0x9c, 0x8f, 0x41, 0x41, 0xf2, 0xae, 0xa9, 0x20, 0x39, 0x9b, 0x3b, + 0x73, 0x39, 0xca, 0x91, 0x5f, 0xb4, 0x60, 0x4c, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x05, 0x6b, 0xb6, + 0x0b, 0xd3, 0x74, 0xa5, 0xdf, 0xbe, 0x1f, 0x92, 0x60, 0x97, 0x34, 0xd9, 0xc2, 0x2c, 0x3d, 0xde, + 0xc2, 0x54, 0x0e, 0xfa, 0xb7, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0x9b, 0xb2, 0xab, 0xea, 0x3d, + 0x43, 0x43, 0xcd, 0x79, 0xe2, 0x3d, 0x83, 0x9a, 0x55, 0x1c, 0xe3, 0xd0, 0xad, 0xb6, 0xed, 0x87, + 0x51, 0xf2, 0x3d, 0xc3, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xaf, 0x03, 0x2c, 0x3f, 0x24, 0x0d, + 0xbe, 0x62, 0xf5, 0xab, 0x96, 0x95, 0x7f, 0xd5, 0xb2, 0x7f, 0xcf, 0x82, 0xc9, 0x95, 0x45, 0xe3, + 0xe4, 0x9a, 0x03, 0xe0, 0xf7, 0xc3, 0x7b, 0xf7, 0xd6, 0xa5, 0x2f, 0x18, 0x77, 0xd6, 0x50, 0xa5, + 0x58, 0xc3, 0x40, 0x67, 0xa1, 0xdc, 0xea, 0x7a, 0x42, 0x83, 0x3b, 0x4c, 0x8f, 0xc7, 0x5b, 0x5d, + 0x0f, 0xd3, 0x32, 0xed, 0xe5, 0x69, 0xb9, 0xef, 0x97, 0xa7, 0x3d, 0x03, 0x60, 0xa1, 0x2a, 0x0c, + 0x3e, 0x78, 0xe0, 0x36, 0x79, 0x5c, 0x0f, 0xe1, 0xa7, 0x76, 0xef, 0xde, 0xea, 0x52, 0x88, 0x79, + 0xb9, 0xfd, 0xcb, 0x16, 0x4c, 0x25, 0x6e, 0xfb, 0xf4, 0xd6, 0xb8, 0xab, 0xa2, 0x2a, 0x25, 0x83, + 0xc7, 0x68, 0xf1, 0x96, 0x34, 0xac, 0x3e, 0x5e, 0x5c, 0x8b, 0x17, 0x3b, 0xe5, 0x3e, 0x5e, 0xec, + 0x14, 0xbb, 0xe1, 0x7f, 0xaf, 0x0c, 0xb3, 0x2b, 0x2d, 0xf2, 0xf0, 0x63, 0x86, 0x63, 0xe9, 0xf7, + 0xa9, 0xef, 0xd1, 0xd4, 0x77, 0x47, 0x7d, 0xce, 0xdd, 0x7b, 0x0a, 0x37, 0x61, 0x98, 0x7f, 0xba, + 0x0c, 0xce, 0x92, 0x69, 0x4c, 0xcf, 0x1f, 0x90, 0x39, 0x3e, 0x84, 0xc2, 0x98, 0xae, 0xce, 0x78, + 0x51, 0x8a, 0x25, 0xf1, 0xd9, 0xb7, 0x60, 0x5c, 0xc7, 0x3c, 0x52, 0x60, 0x85, 0xbf, 0x50, 0x86, + 0x69, 0xda, 0x83, 0xa7, 0x3a, 0x11, 0x77, 0xd2, 0x13, 0xf1, 0xa4, 0x1f, 0xd7, 0xf7, 0x9e, 0x8d, + 0x0f, 0x93, 0xb3, 0x71, 0x35, 0x6f, 0x36, 0x8e, 0x7b, 0x0e, 0xfe, 0xa2, 0x05, 0x27, 0x56, 0x5a, + 0x7e, 0x63, 0x27, 0xf1, 0x00, 0xfe, 0x4d, 0x18, 0xa3, 0x27, 0x48, 0x68, 0xc4, 0x82, 0x32, 0xa2, + 0x83, 0x09, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0xac, 0x2e, 0x65, 0x05, 0x15, 0x13, 0x20, + 0xac, 0xe3, 0xd9, 0xff, 0xdc, 0x82, 0xf3, 0xd7, 0x17, 0x97, 0xe3, 0xa5, 0x98, 0x8a, 0x6b, 0x76, + 0x09, 0x86, 0x3a, 0x4d, 0xad, 0x2b, 0xb1, 0x52, 0x7e, 0x89, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x21, + 0x04, 0xef, 0x00, 0x5c, 0xc7, 0xb5, 0x45, 0x71, 0x54, 0x48, 0x1b, 0x9c, 0x95, 0x6b, 0x83, 0x7b, + 0x01, 0x86, 0xe9, 0x51, 0xe6, 0x36, 0x64, 0xbf, 0xb9, 0xbb, 0x0c, 0x2f, 0xc2, 0x12, 0x66, 0xff, + 0x82, 0x05, 0x27, 0xae, 0xbb, 0x11, 0x95, 0x33, 0x92, 0x81, 0xbb, 0xa8, 0xa0, 0x11, 0xba, 0x91, + 0x1f, 0xec, 0x25, 0x79, 0x2f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x75, 0xd9, 0x93, 0xba, + 0x92, 0x69, 0xf5, 0xc4, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0x5e, 0x4d, 0x37, 0x60, 0x9c, 0x5e, 0x72, + 0x63, 0x35, 0x5e, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x8f, 0x2d, 0xa8, 0x5e, 0xe7, 0x81, 0x01, + 0x36, 0xc3, 0x1c, 0xa6, 0xfb, 0x3a, 0x8c, 0x12, 0x69, 0x9e, 0x49, 0xfa, 0x72, 0x2b, 0xbb, 0x0d, + 0x8f, 0x1f, 0xa6, 0xf0, 0xfa, 0x38, 0x33, 0x8e, 0x16, 0x66, 0x61, 0x05, 0x10, 0xd1, 0xdb, 0xd2, + 0x03, 0xaa, 0xb1, 0xc8, 0x4c, 0xcb, 0x29, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x0d, 0x0b, 0x4e, 0xa9, + 0x0f, 0xfe, 0xc4, 0x7d, 0xa6, 0xfd, 0xab, 0x25, 0x98, 0xb8, 0xb1, 0xb1, 0x51, 0xbb, 0x4e, 0x22, + 0x6d, 0x55, 0x16, 0x3b, 0x5d, 0x60, 0xcd, 0x76, 0x5c, 0x74, 0xad, 0xed, 0x46, 0x6e, 0x6b, 0x8e, + 0x87, 0x09, 0x9d, 0x5b, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa5, 0x4b, + 0x31, 0xab, 0x9c, 0x27, 0x66, 0xa1, 0xd7, 0x61, 0x88, 0xc5, 0x29, 0x95, 0x93, 0xf0, 0x8c, 0xba, + 0x15, 0xb2, 0xd2, 0xc3, 0xfd, 0xea, 0xe8, 0x1d, 0xbc, 0xca, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0, + 0xd8, 0x76, 0x14, 0x75, 0x6e, 0x10, 0xa7, 0x49, 0x02, 0xc9, 0x65, 0x2f, 0x64, 0x71, 0x59, 0x3a, + 0x08, 0x1c, 0x2d, 0x66, 0x4c, 0x71, 0x59, 0x88, 0x75, 0x3a, 0x76, 0x1d, 0x20, 0x86, 0x3d, 0x21, + 0xb3, 0x99, 0xbd, 0x01, 0xa3, 0xf4, 0x73, 0xe7, 0x5b, 0xae, 0x53, 0xec, 0x98, 0xf0, 0x32, 0x8c, + 0x4a, 0xb7, 0x83, 0x50, 0x44, 0x11, 0x62, 0x27, 0x92, 0xf4, 0x4a, 0x08, 0x71, 0x0c, 0xb7, 0x9f, + 0x07, 0xe1, 0x1b, 0x5f, 0x44, 0xd2, 0xde, 0x84, 0x93, 0xcc, 0xc9, 0xdf, 0x89, 0xb6, 0x8d, 0x35, + 0xda, 0x7b, 0x31, 0xbc, 0x22, 0xae, 0xa2, 0x25, 0xe5, 0x6d, 0x25, 0xa3, 0x54, 0x8c, 0x4b, 0x8a, + 0xf1, 0xb5, 0xd4, 0xfe, 0xa3, 0x01, 0x78, 0x66, 0xb5, 0x9e, 0x1f, 0xfe, 0xee, 0x1a, 0x8c, 0x73, + 0x09, 0x97, 0x2e, 0x0d, 0xa7, 0x25, 0xda, 0x55, 0x4a, 0xdb, 0x0d, 0x0d, 0x86, 0x0d, 0x4c, 0x2a, + 0x11, 0xba, 0x1f, 0x79, 0xc9, 0x37, 0xdc, 0xab, 0x1f, 0xac, 0x63, 0x5a, 0x4e, 0xc1, 0x54, 0x58, + 0xe6, 0x2c, 0x5d, 0x81, 0x95, 0xc0, 0xfc, 0x2e, 0x4c, 0xba, 0x61, 0x23, 0x74, 0x57, 0x3d, 0xba, + 0x4f, 0xb5, 0x9d, 0xae, 0xd4, 0x24, 0xb4, 0xd3, 0x0a, 0x8a, 0x13, 0xd8, 0xda, 0xf9, 0x32, 0xd8, + 0xb7, 0xc0, 0xdd, 0x33, 0xf8, 0x0e, 0x65, 0xff, 0x1d, 0xf6, 0x75, 0x21, 0xb3, 0x55, 0x08, 0xf6, + 0xcf, 0x3f, 0x38, 0xc4, 0x12, 0x46, 0xef, 0xa0, 0x8d, 0x6d, 0xa7, 0x33, 0xdf, 0x8d, 0xb6, 0x97, + 0xdc, 0xb0, 0xe1, 0xef, 0x92, 0x60, 0x8f, 0xa9, 0x0f, 0x46, 0xe2, 0x3b, 0xa8, 0x02, 0x2c, 0xde, + 0x98, 0xaf, 0x51, 0x4c, 0x9c, 0xae, 0x83, 0xe6, 0x61, 0x4a, 0x16, 0xd6, 0x49, 0xc8, 0x8e, 0x80, + 0x31, 0x46, 0x46, 0xbd, 0xaa, 0x16, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x14, 0x70, 0xe1, 0x49, 0x08, + 0xb8, 0x5f, 0x80, 0x09, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x68, 0xe3, 0x9a, 0x02, 0xa6, + 0x13, 0x5f, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x8f, 0x01, 0x98, 0x61, 0xd3, 0xf6, 0xe9, 0x0a, + 0xfb, 0x61, 0x5a, 0x61, 0x77, 0xd2, 0x2b, 0xec, 0x49, 0x48, 0xee, 0x8f, 0xbd, 0xcc, 0xbe, 0x63, + 0xc1, 0x0c, 0x53, 0xcb, 0x1b, 0xcb, 0xec, 0x0a, 0x8c, 0x06, 0xc6, 0x83, 0xf7, 0x51, 0xdd, 0xfa, + 0x27, 0xdf, 0xae, 0xc7, 0x38, 0xe8, 0x3d, 0x80, 0x4e, 0xac, 0xf6, 0x2f, 0x19, 0x51, 0x8a, 0x21, + 0x57, 0xe3, 0xaf, 0xd5, 0xb1, 0xbf, 0x05, 0xa3, 0xea, 0x45, 0xbb, 0xbc, 0x20, 0x5b, 0x39, 0x17, + 0xe4, 0xde, 0x62, 0x84, 0xf4, 0x4c, 0x2c, 0x67, 0x7a, 0x26, 0xfe, 0x6b, 0x0b, 0x62, 0xa3, 0x0c, + 0xfa, 0x00, 0x46, 0x3b, 0x3e, 0x73, 0x64, 0x0f, 0xe4, 0xeb, 0x90, 0xe7, 0x0b, 0xad, 0x3a, 0x3c, + 0x14, 0x69, 0xc0, 0xa7, 0xa3, 0x26, 0xab, 0xe2, 0x98, 0x0a, 0xba, 0x09, 0xc3, 0x9d, 0x80, 0xd4, + 0x23, 0x16, 0x27, 0xaf, 0x7f, 0x82, 0x7c, 0xf9, 0xf2, 0x8a, 0x58, 0x52, 0x48, 0xf8, 0x05, 0x97, + 0xfb, 0xf7, 0x0b, 0xb6, 0xff, 0x7e, 0x09, 0xa6, 0x93, 0x8d, 0xa0, 0x77, 0x60, 0x80, 0x3c, 0x24, + 0x0d, 0xf1, 0xa5, 0x99, 0xd2, 0x44, 0xac, 0x10, 0xe2, 0x43, 0x47, 0xff, 0x63, 0x56, 0x0b, 0xdd, + 0x80, 0x61, 0x2a, 0x4a, 0x5c, 0x57, 0xd1, 0x64, 0x9f, 0xcd, 0x13, 0x47, 0x94, 0x4c, 0xc6, 0x3f, + 0x4b, 0x14, 0x61, 0x59, 0x9d, 0x39, 0x12, 0x36, 0x3a, 0x75, 0x7a, 0x4b, 0x8b, 0x8a, 0x94, 0x09, + 0x1b, 0x8b, 0x35, 0x8e, 0x24, 0xa8, 0x71, 0x47, 0x42, 0x59, 0x88, 0x63, 0x22, 0xe8, 0x3d, 0x18, + 0x0c, 0x5b, 0x84, 0x74, 0x84, 0xa7, 0x48, 0xa6, 0x4a, 0xb7, 0x4e, 0x11, 0x04, 0x25, 0xa6, 0x02, + 0x62, 0x05, 0x98, 0x57, 0xb4, 0x7f, 0xcd, 0x02, 0xe0, 0x9e, 0x97, 0x8e, 0xb7, 0x45, 0x8e, 0xc1, + 0x0a, 0xb2, 0x04, 0x03, 0x61, 0x87, 0x34, 0x8a, 0xde, 0x77, 0xc4, 0xfd, 0xa9, 0x77, 0x48, 0x23, + 0x5e, 0xed, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0x9f, 0x00, 0x98, 0x8c, 0xd1, 0x56, 0x23, 0xd2, 0x46, + 0xaf, 0x1a, 0x21, 0xb8, 0xce, 0x26, 0x42, 0x70, 0x8d, 0x32, 0x6c, 0x4d, 0xe1, 0xfe, 0x2d, 0x28, + 0xb7, 0x9d, 0x87, 0x42, 0xa3, 0xfa, 0x72, 0x71, 0x37, 0x28, 0xfd, 0xb9, 0x35, 0xe7, 0x21, 0xbf, + 0xc1, 0xbf, 0x2c, 0x77, 0xe7, 0x9a, 0xf3, 0xb0, 0xe7, 0x1b, 0x04, 0xda, 0x08, 0x6b, 0xcb, 0xf5, + 0x84, 0x53, 0x61, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0x8f, + 0x60, 0x58, 0xf8, 0xfc, 0x8a, 0xd8, 0xa0, 0x57, 0xfa, 0x68, 0x4f, 0xb8, 0x0c, 0xf3, 0x36, 0xaf, + 0x48, 0x0d, 0x85, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x35, 0x0b, 0x26, 0xc5, 0x6f, 0xf1, + 0x9c, 0x56, 0x48, 0xf0, 0x9f, 0xef, 0xbf, 0x0f, 0xa2, 0x22, 0xef, 0xca, 0xe7, 0xe5, 0x61, 0x6b, + 0x02, 0x7b, 0xf6, 0x28, 0xd1, 0x0b, 0xf4, 0xf7, 0x2d, 0x38, 0xd9, 0x76, 0x1e, 0xf2, 0x16, 0x79, + 0x19, 0x76, 0x22, 0xd7, 0x17, 0x6e, 0x2e, 0xef, 0xf4, 0x37, 0xfd, 0xa9, 0xea, 0xbc, 0x93, 0xd2, + 0xba, 0x7c, 0x32, 0x0b, 0xa5, 0x67, 0x57, 0x33, 0xfb, 0x35, 0xbb, 0x09, 0x23, 0x72, 0xbd, 0x3d, + 0xcd, 0x07, 0x0d, 0xac, 0x1d, 0xb1, 0xd6, 0x9e, 0x6a, 0x3b, 0xdf, 0x82, 0x71, 0x7d, 0x8d, 0x3d, + 0xd5, 0xb6, 0x3e, 0x82, 0x13, 0x19, 0x6b, 0xe9, 0xa9, 0x36, 0xf9, 0x00, 0xce, 0xe6, 0xae, 0x8f, + 0xa7, 0xfa, 0x20, 0xe5, 0x57, 0x2d, 0x9d, 0x0f, 0x1e, 0x83, 0x29, 0x6a, 0xd1, 0x34, 0x45, 0x5d, + 0x28, 0xde, 0x39, 0x39, 0xf6, 0xa8, 0x0f, 0xf5, 0x4e, 0x53, 0xae, 0x8e, 0xde, 0x87, 0xa1, 0x16, + 0x2d, 0x91, 0x9e, 0xe3, 0x76, 0xef, 0x1d, 0x19, 0x4b, 0xd4, 0xac, 0x3c, 0xc4, 0x82, 0x82, 0xfd, + 0x33, 0x16, 0x64, 0x3c, 0xa9, 0xa1, 0x12, 0x56, 0xd7, 0x6d, 0xb2, 0x21, 0x29, 0xc7, 0x12, 0x96, + 0x8a, 0x50, 0x75, 0x1e, 0xca, 0x5b, 0x6e, 0x53, 0xbc, 0xd6, 0x57, 0xe0, 0xeb, 0x14, 0xbc, 0xe5, + 0x36, 0xd1, 0x0a, 0xa0, 0xb0, 0xdb, 0xe9, 0xb4, 0x98, 0x67, 0x98, 0xd3, 0xba, 0x1e, 0xf8, 0xdd, + 0x0e, 0x77, 0x13, 0x2f, 0x73, 0xf5, 0x52, 0x3d, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x7f, 0x64, 0xc1, + 0xc0, 0x31, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xd5, 0x5c, 0xd2, 0x22, 0xa5, 0xcc, 0x1c, 0x76, 0x1e, + 0xb0, 0x70, 0x0d, 0x21, 0x13, 0x38, 0x32, 0x67, 0x6d, 0xdf, 0x82, 0x13, 0xb7, 0x7c, 0xa7, 0xb9, + 0xe0, 0xb4, 0x1c, 0xaf, 0x41, 0x82, 0x55, 0x6f, 0xeb, 0x48, 0x6f, 0x32, 0x4a, 0x3d, 0xdf, 0x64, + 0x5c, 0x83, 0x21, 0xb7, 0xa3, 0xe5, 0xa4, 0xb8, 0x48, 0x67, 0x77, 0xb5, 0x26, 0xd2, 0x51, 0x20, + 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x6f, 0x71, 0x20, 0x7f, 0x59, 0xd2, 0x5b, + 0x52, 0x32, 0xd6, 0xa2, 0xe1, 0xb6, 0xbf, 0x0d, 0x46, 0x13, 0xe2, 0x91, 0x1a, 0x86, 0x61, 0x97, + 0x7f, 0xa9, 0x58, 0x9b, 0x2f, 0x66, 0xdf, 0x5e, 0x52, 0x03, 0xa3, 0xbd, 0xc6, 0xe4, 0x05, 0x58, + 0x12, 0xb2, 0xaf, 0x41, 0x66, 0x6c, 0xac, 0xde, 0x9a, 0x29, 0xfb, 0x2b, 0x30, 0xc3, 0x6a, 0x1e, + 0x51, 0xeb, 0x63, 0x27, 0xf4, 0xe9, 0x19, 0xe1, 0xc5, 0xed, 0xff, 0xc5, 0x02, 0xb4, 0xe6, 0x37, + 0xdd, 0xcd, 0x3d, 0x41, 0x9c, 0x7f, 0xff, 0x47, 0x50, 0xe5, 0xd7, 0xea, 0x64, 0x08, 0xee, 0xc5, + 0x96, 0x13, 0x86, 0x9a, 0x2e, 0xff, 0x45, 0xd1, 0x6e, 0x75, 0xa3, 0x18, 0x1d, 0xf7, 0xa2, 0x87, + 0x3e, 0x48, 0x44, 0x44, 0xfd, 0x62, 0x2a, 0x22, 0xea, 0x8b, 0x99, 0x4e, 0x40, 0xe9, 0xde, 0xcb, + 0x48, 0xa9, 0xf6, 0x77, 0x2d, 0x98, 0x5a, 0x4f, 0x84, 0x94, 0xbe, 0xc4, 0x3c, 0x22, 0x32, 0x6c, + 0x54, 0x75, 0x56, 0x8a, 0x05, 0xf4, 0x89, 0xeb, 0x70, 0xff, 0xd4, 0x82, 0x38, 0x16, 0xdf, 0x31, + 0x88, 0xdc, 0x8b, 0x86, 0xc8, 0x9d, 0x79, 0x7d, 0x51, 0xdd, 0xc9, 0x93, 0xb8, 0xd1, 0x4d, 0x35, + 0x27, 0x05, 0x37, 0x97, 0x98, 0x0c, 0xdf, 0x67, 0x93, 0xe6, 0xc4, 0xa9, 0xd9, 0xf8, 0xfd, 0x12, + 0x20, 0x85, 0xdb, 0x77, 0x14, 0xdd, 0x74, 0x8d, 0x27, 0x13, 0x45, 0x77, 0x17, 0x10, 0xf3, 0xe9, + 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0xa1, 0xb5, 0x3e, 0x9a, 0xc3, 0x90, 0x72, 0x89, 0xbd, 0x95, + 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0x7c, 0xb5, 0x06, 0xfb, 0xf5, 0xd5, 0x1a, 0xea, 0xf1, 0xe8, 0xfe, + 0x57, 0x2c, 0x98, 0x50, 0xc3, 0xf4, 0x09, 0x79, 0xba, 0xa3, 0xfa, 0x93, 0x73, 0xae, 0xd4, 0xb4, + 0x2e, 0x33, 0x61, 0xe0, 0x47, 0x58, 0xf0, 0x04, 0xa7, 0xe5, 0x3e, 0x22, 0x2a, 0xd8, 0x7b, 0x55, + 0x04, 0x43, 0x10, 0xa5, 0x87, 0xfb, 0xd5, 0x09, 0xf5, 0x8f, 0xfb, 0x23, 0xc4, 0x55, 0xec, 0xbf, + 0x4d, 0x37, 0xbb, 0xb9, 0x14, 0xd1, 0x9b, 0x30, 0xd8, 0xd9, 0x76, 0x42, 0x92, 0x78, 0xe2, 0x38, + 0x58, 0xa3, 0x85, 0x87, 0xfb, 0xd5, 0x49, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0x6c, 0xe2, + 0xf4, 0xe2, 0xec, 0x19, 0x9b, 0xf8, 0xdf, 0x59, 0x30, 0xb0, 0x4e, 0x4f, 0xaf, 0xa7, 0xcf, 0x02, + 0xde, 0x35, 0x58, 0xc0, 0xb9, 0xbc, 0xb4, 0x67, 0xb9, 0xbb, 0x7f, 0x25, 0xb1, 0xfb, 0x2f, 0xe4, + 0x52, 0x28, 0xde, 0xf8, 0x6d, 0x18, 0x63, 0xc9, 0xd4, 0xc4, 0x73, 0xce, 0xd7, 0x8d, 0x0d, 0x5f, + 0x4d, 0x6c, 0xf8, 0x29, 0x0d, 0x55, 0xdb, 0xe9, 0x2f, 0xc1, 0xb0, 0x78, 0x1f, 0x98, 0x8c, 0x41, + 0x21, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x57, 0x06, 0x23, 0x79, 0x1b, 0xfa, 0x27, 0x16, 0xcc, 0x05, + 0xdc, 0xc5, 0xbf, 0xb9, 0xd4, 0x0d, 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x6e, 0xcb, 0xf5, + 0xb6, 0x56, 0xb7, 0x3c, 0x5f, 0x15, 0x2f, 0x3f, 0x24, 0x8d, 0xae, 0x8a, 0xdb, 0x53, 0x90, 0x29, + 0x4e, 0x3d, 0x93, 0x79, 0xed, 0x60, 0xbf, 0x3a, 0x87, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8, + 0x9f, 0x5b, 0x70, 0x85, 0x27, 0x11, 0xeb, 0xbf, 0xff, 0x05, 0x1a, 0x8e, 0x9a, 0x24, 0x15, 0x13, + 0xd9, 0x20, 0x41, 0x7b, 0xe1, 0x0b, 0x62, 0x40, 0xaf, 0xd4, 0x8e, 0xd6, 0x16, 0x3e, 0x6a, 0xe7, + 0xec, 0xff, 0xa6, 0x0c, 0x13, 0x22, 0x86, 0xad, 0x38, 0x03, 0xde, 0x34, 0x96, 0xc4, 0xb3, 0x89, + 0x25, 0x31, 0x63, 0x20, 0x3f, 0x19, 0xf6, 0x1f, 0xc2, 0x0c, 0x65, 0xce, 0x37, 0x88, 0x13, 0x44, + 0xf7, 0x89, 0xc3, 0x5d, 0x30, 0xcb, 0x47, 0xe6, 0xfe, 0x4a, 0xb1, 0x7e, 0x2b, 0x49, 0x0c, 0xa7, + 0xe9, 0xff, 0x30, 0x9d, 0x39, 0x1e, 0x4c, 0xa7, 0xc2, 0x10, 0x7f, 0x15, 0x46, 0xd5, 0xe3, 0x36, + 0xc1, 0x74, 0x8a, 0xa3, 0x79, 0x27, 0x29, 0x70, 0xa5, 0x67, 0xfc, 0xb0, 0x32, 0x26, 0x67, 0xff, + 0x72, 0xc9, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x38, 0x21, 0xcb, 0x30, 0xd0, 0x2c, 0xd2, 0x68, + 0xa7, 0x9a, 0x61, 0x7e, 0x66, 0xf3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x0d, 0xee, 0xe8, 0xba, 0x4b, + 0x8a, 0xd4, 0xd9, 0x29, 0x6a, 0x20, 0x5d, 0x61, 0x77, 0x09, 0x16, 0xf5, 0xd1, 0xd7, 0xb9, 0x27, + 0xf2, 0x4d, 0xcf, 0x7f, 0xe0, 0x5d, 0xf7, 0x7d, 0x19, 0x04, 0xaa, 0x3f, 0x82, 0x33, 0xd2, 0xff, + 0x58, 0x55, 0xc7, 0x26, 0xb5, 0xfe, 0xe2, 0xfa, 0xff, 0x28, 0xb0, 0xa4, 0x49, 0x66, 0x2c, 0x89, + 0x10, 0x11, 0x98, 0x12, 0x01, 0x92, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xeb, 0xb7, 0x59, 0x3b, 0xb6, + 0x00, 0xdd, 0x34, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x9b, 0x33, 0xe1, 0x15, 0xe2, 0x44, 0xdd, 0x80, + 0x84, 0xe8, 0xcb, 0x50, 0x49, 0xdf, 0x8c, 0x85, 0x21, 0xc5, 0x62, 0xd2, 0xf3, 0xb9, 0x83, 0xfd, + 0x6a, 0xa5, 0x9e, 0x83, 0x83, 0x73, 0x6b, 0xdb, 0x3f, 0x6f, 0x01, 0x7b, 0xc1, 0x7f, 0x0c, 0x92, + 0xcf, 0x97, 0x4c, 0xc9, 0xa7, 0x92, 0x37, 0x9d, 0x39, 0x42, 0xcf, 0x1b, 0x7c, 0x0d, 0xd7, 0x02, + 0xff, 0xe1, 0x9e, 0xf0, 0xfa, 0xea, 0x7d, 0x8d, 0xb3, 0xbf, 0x67, 0x01, 0xcb, 0x30, 0x86, 0xf9, + 0xad, 0x5d, 0x1a, 0x38, 0x7a, 0x3b, 0x34, 0x7c, 0x19, 0x46, 0x36, 0xc5, 0xf0, 0x67, 0x28, 0x9d, + 0x8c, 0x0e, 0x9b, 0xb4, 0xe5, 0xa4, 0x89, 0x97, 0xb8, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2, + 0x82, 0xd9, 0xfc, 0x6a, 0xe8, 0x0e, 0x9c, 0x09, 0x48, 0xa3, 0x1b, 0x84, 0x74, 0x4b, 0x88, 0x0b, + 0x90, 0x78, 0x01, 0xc6, 0xa7, 0xfa, 0x99, 0x83, 0xfd, 0xea, 0x19, 0x9c, 0x8d, 0x82, 0xf3, 0xea, + 0xa2, 0xb7, 0x60, 0xb2, 0x1b, 0x72, 0xc9, 0x8f, 0x09, 0x5d, 0xa1, 0x08, 0x63, 0xcf, 0x1e, 0x49, + 0xdd, 0x31, 0x20, 0x38, 0x81, 0x69, 0xff, 0x79, 0xbe, 0x1c, 0x95, 0xc7, 0x6b, 0x1b, 0x66, 0x3c, + 0xed, 0x3f, 0x3d, 0x01, 0xe5, 0x55, 0xff, 0xf9, 0x5e, 0xa7, 0x3e, 0x3b, 0x2e, 0xb5, 0x18, 0x03, + 0x09, 0x32, 0x38, 0x4d, 0xd9, 0xfe, 0x9b, 0x16, 0x9c, 0xd1, 0x11, 0xb5, 0x17, 0x87, 0xbd, 0xac, + 0x80, 0x4b, 0x5a, 0x00, 0x3e, 0x7e, 0xcc, 0x5d, 0xce, 0x08, 0xc0, 0x77, 0x52, 0xa7, 0x5e, 0x18, + 0x6d, 0x8f, 0xbf, 0x2d, 0xcd, 0x8a, 0xb6, 0xf7, 0x47, 0x16, 0x5f, 0x9f, 0x7a, 0xd7, 0xd1, 0x47, + 0x30, 0xdd, 0x76, 0xa2, 0xc6, 0xf6, 0xf2, 0xc3, 0x4e, 0xc0, 0x8d, 0xbb, 0x72, 0x9c, 0x5e, 0xee, + 0x35, 0x4e, 0xda, 0x47, 0xc6, 0xde, 0xe0, 0x6b, 0x09, 0x62, 0x38, 0x45, 0x1e, 0xdd, 0x87, 0x31, + 0x56, 0xc6, 0xde, 0x62, 0x87, 0x45, 0xb2, 0x4c, 0x5e, 0x6b, 0xca, 0x39, 0x68, 0x2d, 0xa6, 0x83, + 0x75, 0xa2, 0xf6, 0x2f, 0x95, 0x39, 0xd3, 0x60, 0x77, 0x8f, 0x97, 0x60, 0xb8, 0xe3, 0x37, 0x17, + 0x57, 0x97, 0xb0, 0x98, 0x05, 0x75, 0xee, 0xd5, 0x78, 0x31, 0x96, 0x70, 0x74, 0x19, 0x46, 0xc4, + 0x4f, 0x69, 0x8c, 0x67, 0x7b, 0x44, 0xe0, 0x85, 0x58, 0x41, 0xd1, 0x6b, 0x00, 0x9d, 0xc0, 0xdf, + 0x75, 0x9b, 0x2c, 0xf6, 0x56, 0xd9, 0xf4, 0xeb, 0xab, 0x29, 0x08, 0xd6, 0xb0, 0xd0, 0xdb, 0x30, + 0xd1, 0xf5, 0x42, 0x2e, 0x3f, 0x69, 0xc9, 0x38, 0x94, 0xc7, 0xd9, 0x1d, 0x1d, 0x88, 0x4d, 0x5c, + 0x34, 0x0f, 0x43, 0x91, 0xc3, 0xfc, 0xd4, 0x06, 0xf3, 0x5f, 0x0c, 0x6c, 0x50, 0x0c, 0x3d, 0xed, + 0x25, 0xad, 0x80, 0x45, 0x45, 0xf4, 0x55, 0x19, 0x16, 0x81, 0x9f, 0x44, 0xe2, 0xa9, 0x4e, 0x7f, + 0xa7, 0x96, 0x16, 0x14, 0x41, 0x3c, 0x01, 0x32, 0x68, 0xa1, 0xb7, 0x00, 0xc8, 0xc3, 0x88, 0x04, + 0x9e, 0xd3, 0x52, 0xde, 0xa5, 0x4a, 0x90, 0x59, 0xf2, 0xd7, 0xfd, 0xe8, 0x4e, 0x48, 0x96, 0x15, + 0x06, 0xd6, 0xb0, 0xed, 0x9f, 0x18, 0x03, 0x88, 0x2f, 0x1a, 0xe8, 0x11, 0x8c, 0x34, 0x9c, 0x8e, + 0xd3, 0xe0, 0x39, 0x9d, 0xcb, 0x79, 0x0f, 0xcb, 0xe3, 0x1a, 0x73, 0x8b, 0x02, 0x9d, 0x1b, 0x6f, + 0x64, 0x3e, 0x83, 0x11, 0x59, 0xdc, 0xd3, 0x60, 0xa3, 0xda, 0x43, 0xdf, 0xb1, 0x60, 0x4c, 0xc4, + 0xb6, 0x62, 0x33, 0x54, 0xca, 0xb7, 0xb7, 0x69, 0xed, 0xcf, 0xc7, 0x35, 0x78, 0x17, 0x5e, 0x97, + 0x2b, 0x54, 0x83, 0xf4, 0xec, 0x85, 0xde, 0x30, 0xfa, 0x9c, 0xbc, 0xdb, 0x96, 0x8d, 0xa1, 0x54, + 0x77, 0xdb, 0x51, 0x76, 0xd4, 0xe8, 0xd7, 0xda, 0x3b, 0xc6, 0xb5, 0x76, 0x20, 0xff, 0x89, 0xb6, + 0x21, 0x6f, 0xf7, 0xba, 0xd1, 0xa2, 0x9a, 0x1e, 0x03, 0x66, 0x30, 0xff, 0x85, 0xaf, 0x76, 0xb1, + 0xeb, 0x11, 0xff, 0xe5, 0x5b, 0x30, 0xd5, 0x34, 0xa5, 0x16, 0xb1, 0x12, 0x5f, 0xcc, 0xa3, 0x9b, + 0x10, 0x72, 0x62, 0x39, 0x25, 0x01, 0xc0, 0x49, 0xc2, 0xa8, 0xc6, 0x43, 0x02, 0xad, 0x7a, 0x9b, + 0xbe, 0x78, 0x2e, 0x66, 0xe7, 0xce, 0xe5, 0x5e, 0x18, 0x91, 0x36, 0xc5, 0x8c, 0x85, 0x84, 0x75, + 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x7d, 0x18, 0x62, 0x4f, 0x3c, 0xc3, 0xca, 0x48, 0xbe, 0x59, 0xc3, + 0x8c, 0x2e, 0x1c, 0x6f, 0x48, 0xf6, 0x37, 0xc4, 0x82, 0x02, 0xba, 0x21, 0x1f, 0x50, 0x87, 0xab, + 0xde, 0x9d, 0x90, 0xb0, 0x07, 0xd4, 0xa3, 0x0b, 0xcf, 0xc7, 0x6f, 0xa3, 0x79, 0x79, 0x66, 0x72, + 0x6c, 0xa3, 0x26, 0x15, 0xfb, 0xc4, 0x7f, 0x99, 0x73, 0x5b, 0x44, 0xea, 0xcb, 0xec, 0x9e, 0x99, + 0x97, 0x3b, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, 0x45, 0x68, 0xbe, 0xeb, 0xc5, 0x83, + 0xb3, 0x5e, 0xbc, 0x83, 0x6b, 0x0e, 0xd8, 0x69, 0xc4, 0x4b, 0xb0, 0xa8, 0x8f, 0x5c, 0x98, 0x0a, + 0x0c, 0xf1, 0x42, 0x06, 0xd8, 0xbb, 0xd4, 0x9f, 0x10, 0xa3, 0x65, 0x19, 0x31, 0xc9, 0xe0, 0x24, + 0x5d, 0xf4, 0xbe, 0x26, 0x28, 0x4d, 0x14, 0xdf, 0xfc, 0x7b, 0x89, 0x46, 0xb3, 0x3b, 0x30, 0x61, + 0x30, 0x9b, 0xa7, 0x6a, 0x82, 0xf4, 0x60, 0x3a, 0xc9, 0x59, 0x9e, 0xaa, 0xe5, 0xf1, 0x2d, 0x98, + 0x64, 0x1b, 0xe1, 0x81, 0xd3, 0x11, 0xac, 0xf8, 0xb2, 0xc1, 0x8a, 0xad, 0xcb, 0x65, 0x3e, 0x30, + 0x72, 0x08, 0x62, 0xc6, 0x69, 0xff, 0x9d, 0x41, 0x51, 0x59, 0xed, 0x22, 0x74, 0x05, 0x46, 0x45, + 0x07, 0x54, 0xaa, 0x3e, 0xc5, 0x18, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x43, 0x23, 0xab, 0xae, + 0xbd, 0x50, 0x88, 0x33, 0x34, 0x2a, 0x08, 0xd6, 0xb0, 0xe8, 0xe5, 0xf7, 0xbe, 0xef, 0x47, 0xea, + 0x0c, 0x56, 0x5b, 0x6d, 0x81, 0x95, 0x62, 0x01, 0xa5, 0x67, 0xef, 0x0e, 0x09, 0x3c, 0xd2, 0x32, + 0x73, 0xd5, 0xa8, 0xb3, 0xf7, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0x95, 0x20, 0xfc, 0x90, 0xed, 0x5d, + 0x71, 0xc5, 0x8e, 0x5f, 0x7c, 0xd4, 0x79, 0x90, 0x0f, 0x09, 0x47, 0x5f, 0x81, 0x33, 0x2a, 0xd8, + 0xa6, 0x58, 0x99, 0xb2, 0xc5, 0x21, 0x43, 0x23, 0x76, 0x66, 0x31, 0x1b, 0x0d, 0xe7, 0xd5, 0x47, + 0xef, 0xc2, 0xa4, 0xb8, 0x86, 0x49, 0x8a, 0xc3, 0xa6, 0xfb, 0xe2, 0x4d, 0x03, 0x8a, 0x13, 0xd8, + 0x32, 0xdb, 0x0e, 0xbb, 0x9f, 0x48, 0x0a, 0x23, 0xe9, 0x6c, 0x3b, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0, + 0x79, 0x98, 0xe2, 0x62, 0xa7, 0xeb, 0x6d, 0xf1, 0x39, 0x11, 0x4f, 0x60, 0xd5, 0x86, 0xbc, 0x6d, + 0x82, 0x71, 0x12, 0x1f, 0x5d, 0x83, 0x71, 0x27, 0x68, 0x6c, 0xbb, 0x11, 0x69, 0xd0, 0x5d, 0xc5, + 0x3c, 0x08, 0x35, 0xff, 0xcf, 0x79, 0x0d, 0x86, 0x0d, 0x4c, 0xf4, 0x1e, 0x0c, 0x84, 0x0f, 0x9c, + 0x8e, 0xe0, 0x3e, 0xf9, 0xac, 0x5c, 0xad, 0x60, 0xee, 0xfa, 0x45, 0xff, 0x63, 0x56, 0xd3, 0x7e, + 0x04, 0x27, 0x32, 0x82, 0x12, 0xd1, 0xa5, 0xe7, 0x74, 0x5c, 0x39, 0x2a, 0x89, 0x67, 0x1a, 0xf3, + 0xb5, 0x55, 0x39, 0x1e, 0x1a, 0x16, 0x5d, 0xdf, 0x2c, 0x78, 0x51, 0x2d, 0x36, 0x24, 0xa9, 0xf5, + 0xbd, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0xa4, 0x04, 0x53, 0x19, 0xe6, 0x41, 0x96, 0x1b, 0x3f, + 0x71, 0xcf, 0x8b, 0x53, 0xe1, 0x9b, 0xe9, 0x9f, 0x4a, 0x47, 0x48, 0xff, 0x54, 0xee, 0x95, 0xfe, + 0x69, 0xe0, 0xe3, 0xa4, 0x7f, 0x32, 0x47, 0x6c, 0xb0, 0xaf, 0x11, 0xcb, 0x48, 0x19, 0x35, 0x74, + 0xc4, 0x94, 0x51, 0xc6, 0xa0, 0x0f, 0xf7, 0x31, 0xe8, 0xff, 0x69, 0x09, 0xa6, 0x93, 0x96, 0xc5, + 0x63, 0xd0, 0xce, 0xbf, 0x6f, 0x68, 0xe7, 0x2f, 0xf7, 0x13, 0xf4, 0x20, 0x57, 0x53, 0x8f, 0x13, + 0x9a, 0xfa, 0xcf, 0xf6, 0x45, 0xad, 0x58, 0x6b, 0xff, 0xb7, 0x4a, 0x70, 0x2a, 0xd3, 0xe0, 0x7a, + 0x0c, 0x63, 0x73, 0xdb, 0x18, 0x9b, 0x57, 0xfb, 0x0e, 0x08, 0x91, 0x3b, 0x40, 0xf7, 0x12, 0x03, + 0x74, 0xa5, 0x7f, 0x92, 0xc5, 0xa3, 0xf4, 0xfd, 0x32, 0x5c, 0xc8, 0xac, 0x17, 0x2b, 0xb7, 0x57, + 0x0c, 0xe5, 0xf6, 0x6b, 0x09, 0xe5, 0xb6, 0x5d, 0x5c, 0xfb, 0xc9, 0x68, 0xbb, 0x45, 0x60, 0x04, + 0x16, 0xde, 0xe5, 0x31, 0x35, 0xdd, 0x46, 0x60, 0x04, 0x45, 0x08, 0x9b, 0x74, 0x7f, 0x98, 0x34, + 0xdc, 0xff, 0x83, 0x05, 0x67, 0x33, 0xe7, 0xe6, 0x18, 0xf4, 0x8c, 0xeb, 0xa6, 0x9e, 0xf1, 0xa5, + 0xbe, 0x57, 0x6b, 0x8e, 0xe2, 0xf1, 0xbb, 0x43, 0x39, 0xdf, 0xc2, 0xd4, 0x1f, 0xb7, 0x61, 0xcc, + 0x69, 0x34, 0x48, 0x18, 0xae, 0xb1, 0x54, 0x13, 0xdc, 0xf6, 0xfa, 0x2a, 0xbb, 0x9c, 0xc6, 0xc5, + 0x87, 0xfb, 0xd5, 0xd9, 0x24, 0x89, 0x18, 0x8c, 0x75, 0x0a, 0xe8, 0xeb, 0x30, 0x12, 0xca, 0x24, + 0xbf, 0x03, 0x8f, 0x9f, 0xe4, 0x97, 0x49, 0x92, 0x4a, 0xbd, 0xa3, 0x48, 0xa2, 0x3f, 0xa7, 0x87, + 0xf7, 0x2a, 0x50, 0x6c, 0xf2, 0x4e, 0x3e, 0x46, 0x90, 0x2f, 0xf3, 0x39, 0x7c, 0xb9, 0xaf, 0xe7, + 0xf0, 0xef, 0xc1, 0x74, 0xc8, 0xc3, 0xe5, 0xc6, 0x2e, 0x32, 0x7c, 0x2d, 0xb2, 0x88, 0x83, 0xf5, + 0x04, 0x0c, 0xa7, 0xb0, 0xd1, 0x8a, 0x6c, 0x95, 0x39, 0x43, 0xf1, 0xe5, 0x79, 0x29, 0x6e, 0x51, + 0x38, 0x44, 0x9d, 0x4c, 0x4e, 0x02, 0x1b, 0x7e, 0xad, 0x26, 0xfa, 0x3a, 0x00, 0x5d, 0x44, 0x42, + 0x85, 0x33, 0x9c, 0xcf, 0x42, 0x29, 0x6f, 0x69, 0x66, 0xbe, 0xc0, 0x60, 0x11, 0x0d, 0x96, 0x14, + 0x11, 0xac, 0x11, 0x44, 0x0e, 0x4c, 0xc4, 0xff, 0x30, 0xd9, 0x2c, 0x0a, 0xb0, 0xc6, 0x5a, 0x48, + 0x12, 0x67, 0xe6, 0x8d, 0x25, 0x9d, 0x04, 0x36, 0x29, 0xa2, 0xaf, 0xc1, 0xd9, 0xdd, 0x5c, 0xbf, + 0x23, 0x2e, 0x4b, 0x9e, 0x3f, 0xd8, 0xaf, 0x9e, 0xcd, 0xf7, 0x36, 0xca, 0xaf, 0x6f, 0xff, 0x8f, + 0x00, 0xcf, 0x14, 0x70, 0x7a, 0x34, 0x6f, 0xfa, 0x0c, 0xbc, 0x9c, 0xd4, 0xab, 0xcc, 0x66, 0x56, + 0x36, 0x14, 0x2d, 0x89, 0x0d, 0x55, 0xfa, 0xd8, 0x1b, 0xea, 0xa7, 0x2c, 0xed, 0x9a, 0xc5, 0x3d, + 0xca, 0xbf, 0x74, 0xc4, 0x13, 0xec, 0x09, 0xaa, 0xc0, 0x36, 0x33, 0xf4, 0x48, 0xaf, 0xf5, 0xdd, + 0x9d, 0xfe, 0x15, 0x4b, 0xbf, 0x9a, 0x9d, 0x60, 0x80, 0xab, 0x98, 0xae, 0x1f, 0xf5, 0xfb, 0x8f, + 0x2b, 0xd9, 0xc0, 0xef, 0x5b, 0x70, 0x36, 0x55, 0xcc, 0xfb, 0x40, 0x42, 0x11, 0xce, 0x70, 0xfd, + 0x63, 0x77, 0x5e, 0x12, 0xe4, 0xdf, 0x70, 0x43, 0x7c, 0xc3, 0xd9, 0x5c, 0xbc, 0x64, 0xd7, 0x7f, + 0xf2, 0x5f, 0x55, 0x4f, 0xb0, 0x06, 0x4c, 0x44, 0x9c, 0xdf, 0x75, 0xd4, 0x81, 0x8b, 0x8d, 0x6e, + 0x10, 0xc4, 0x8b, 0x35, 0x63, 0x73, 0xf2, 0xdb, 0xe2, 0xf3, 0x07, 0xfb, 0xd5, 0x8b, 0x8b, 0x3d, + 0x70, 0x71, 0x4f, 0x6a, 0xc8, 0x03, 0xd4, 0x4e, 0x79, 0xf7, 0x31, 0x06, 0x90, 0xa3, 0x05, 0x4a, + 0xfb, 0x02, 0x72, 0x3f, 0xdd, 0x0c, 0x1f, 0xc1, 0x0c, 0xca, 0xc7, 0xab, 0xbb, 0xf9, 0xc1, 0x64, + 0x33, 0x98, 0xbd, 0x05, 0x17, 0x8a, 0x17, 0xd3, 0x91, 0x42, 0x50, 0xfc, 0x9e, 0x05, 0xe7, 0x0b, + 0x43, 0xb3, 0xfd, 0x19, 0xbc, 0x2c, 0xd8, 0xdf, 0xb6, 0xe0, 0xd9, 0xcc, 0x1a, 0xc9, 0xc7, 0x83, + 0x0d, 0x5a, 0xa8, 0x39, 0xc3, 0xc6, 0x41, 0x8a, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xbf, 0x68, 0xa9, + 0xa7, 0xbf, 0xe8, 0x3f, 0xb5, 0x20, 0x75, 0xd4, 0x1f, 0x83, 0xe4, 0xb9, 0x6a, 0x4a, 0x9e, 0xcf, + 0xf7, 0x33, 0x9a, 0x39, 0x42, 0xe7, 0xbf, 0x9d, 0x82, 0xd3, 0x39, 0x2f, 0xc8, 0x77, 0x61, 0x66, + 0xab, 0x41, 0xcc, 0x90, 0x21, 0x45, 0xd1, 0xff, 0x0a, 0xe3, 0x8b, 0x2c, 0x9c, 0x3a, 0xd8, 0xaf, + 0xce, 0xa4, 0x50, 0x70, 0xba, 0x09, 0xf4, 0x6d, 0x0b, 0x4e, 0x3a, 0x0f, 0xc2, 0x65, 0x7a, 0x83, + 0x70, 0x1b, 0x0b, 0x2d, 0xbf, 0xb1, 0x43, 0x05, 0x33, 0xb9, 0xad, 0xde, 0xc8, 0x54, 0x85, 0xdf, + 0xab, 0xa7, 0xf0, 0x8d, 0xe6, 0x2b, 0x07, 0xfb, 0xd5, 0x93, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, + 0x45, 0x0e, 0x3f, 0x27, 0xda, 0x2e, 0x0a, 0x6a, 0x93, 0xf5, 0xd4, 0x9f, 0x8b, 0xc4, 0x12, 0x82, + 0x15, 0x1d, 0xf4, 0x4d, 0x18, 0xdd, 0x92, 0xf1, 0x2b, 0x32, 0x44, 0xee, 0x78, 0x20, 0x8b, 0xa3, + 0x7a, 0x70, 0x07, 0x1c, 0x85, 0x84, 0x63, 0xa2, 0xe8, 0x5d, 0x28, 0x7b, 0x9b, 0x61, 0x51, 0x08, + 0xe9, 0x84, 0xa7, 0x35, 0x8f, 0x76, 0xb5, 0xbe, 0x52, 0xc7, 0xb4, 0x22, 0xba, 0x01, 0xe5, 0xe0, + 0x7e, 0x53, 0xd8, 0x71, 0x32, 0x37, 0x29, 0x5e, 0x58, 0xca, 0xe9, 0x15, 0xa3, 0x84, 0x17, 0x96, + 0x30, 0x25, 0x81, 0x6a, 0x30, 0xc8, 0x9e, 0x5d, 0x0b, 0xd1, 0x36, 0xf3, 0x2a, 0x5f, 0x10, 0xbe, + 0x80, 0xbf, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x1b, 0x30, 0xd4, 0x70, 0xbd, 0x26, 0x09, 0x84, + 0x2c, 0xfb, 0xb9, 0x4c, 0x8b, 0x0d, 0xc3, 0xc8, 0xa1, 0xc9, 0x0d, 0x18, 0x0c, 0x03, 0x0b, 0x5a, + 0x8c, 0x2a, 0xe9, 0x6c, 0x6f, 0xca, 0x13, 0x2b, 0x9b, 0x2a, 0xe9, 0x6c, 0xaf, 0xd4, 0x0b, 0xa9, + 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xb7, 0xa0, 0xb4, 0xd9, 0x10, 0x4f, 0xaa, 0x33, 0xd5, 0x9b, 0x66, + 0xc0, 0xb2, 0x85, 0xa1, 0x83, 0xfd, 0x6a, 0x69, 0x65, 0x11, 0x97, 0x36, 0x1b, 0x68, 0x1d, 0x86, + 0x37, 0x79, 0xbc, 0x20, 0xa1, 0x1f, 0x7d, 0x31, 0x3b, 0x94, 0x51, 0x2a, 0xa4, 0x10, 0x7f, 0xdb, + 0x2a, 0x00, 0x58, 0x12, 0x61, 0x09, 0xcf, 0x54, 0xdc, 0x23, 0x11, 0x29, 0x76, 0xee, 0x68, 0xb1, + 0xaa, 0x44, 0xa0, 0x71, 0x45, 0x05, 0x6b, 0x14, 0xe9, 0xaa, 0x76, 0x1e, 0x75, 0x03, 0x96, 0x11, + 0x45, 0x18, 0x66, 0x32, 0x57, 0xf5, 0xbc, 0x44, 0x2a, 0x5a, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, + 0x0e, 0x4c, 0xec, 0x86, 0x9d, 0x6d, 0x22, 0xb7, 0x34, 0x8b, 0x30, 0x98, 0x23, 0xcd, 0xde, 0x15, + 0x88, 0x6e, 0x10, 0x75, 0x9d, 0x56, 0x8a, 0x0b, 0xb1, 0x6b, 0xcd, 0x5d, 0x9d, 0x18, 0x36, 0x69, + 0xd3, 0xe1, 0xff, 0xa8, 0xeb, 0xdf, 0xdf, 0x8b, 0x88, 0x08, 0xf0, 0x9a, 0x39, 0xfc, 0x1f, 0x70, + 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0x8c, 0x7b, 0x4e, 0xe7, 0x07, + 0xc2, 0x9f, 0x97, 0x48, 0x39, 0x83, 0xc2, 0xb8, 0x65, 0x4c, 0x8a, 0x71, 0xc9, 0xce, 0xb6, 0x1f, + 0xf9, 0x5e, 0x82, 0x43, 0xcf, 0xe4, 0x73, 0xc9, 0x5a, 0x06, 0x7e, 0x9a, 0x4b, 0x66, 0x61, 0xe1, + 0xcc, 0xb6, 0x50, 0x13, 0x26, 0x3b, 0x7e, 0x10, 0x3d, 0xf0, 0x03, 0xb9, 0xbe, 0x50, 0x81, 0xa2, + 0xd4, 0xc0, 0x14, 0x2d, 0x32, 0xb7, 0x20, 0x13, 0x82, 0x13, 0x34, 0xd1, 0x97, 0x61, 0x38, 0x6c, + 0x38, 0x2d, 0xb2, 0x7a, 0xbb, 0x72, 0x22, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0xc3, + 0x3d, 0x71, 0x14, 0x2c, 0xc9, 0xa1, 0x15, 0x18, 0x64, 0x39, 0xef, 0x59, 0x34, 0xe2, 0x9c, 0x78, + 0xfe, 0xa9, 0x47, 0x3d, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x34, 0x05, 0x7e, + 0x58, 0x39, 0x95, 0xbf, 0x07, 0x84, 0x82, 0xe1, 0x76, 0xbd, 0x68, 0x0f, 0x28, 0x24, 0x1c, 0x13, + 0xa5, 0x9c, 0x99, 0x72, 0xd3, 0xd3, 0x05, 0x0e, 0x9b, 0xb9, 0xbc, 0x94, 0x71, 0x66, 0xca, 0x49, + 0x29, 0x09, 0xfb, 0x37, 0x47, 0xd2, 0x32, 0x0b, 0xd3, 0x30, 0xfd, 0xc7, 0x56, 0xca, 0x63, 0xe3, + 0xf3, 0xfd, 0x2a, 0xbc, 0x9f, 0xe0, 0xc5, 0xf5, 0xdb, 0x16, 0x9c, 0xee, 0x64, 0x7e, 0x88, 0x10, + 0x00, 0xfa, 0xd3, 0x9b, 0xf3, 0x4f, 0x57, 0x91, 0xab, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xa4, 0x72, + 0xa0, 0xfc, 0xb1, 0x95, 0x03, 0x6b, 0x30, 0xd2, 0xe0, 0x37, 0x39, 0x99, 0x3c, 0xa2, 0xaf, 0xb8, + 0xab, 0xdc, 0x4e, 0x2b, 0x2a, 0x62, 0x45, 0x02, 0xfd, 0xb4, 0x05, 0xe7, 0x93, 0x5d, 0xc7, 0x84, + 0x81, 0x85, 0xbb, 0x26, 0x57, 0x6b, 0xad, 0x88, 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf2, 0x61, 0x2f, + 0x04, 0x5c, 0xdc, 0x18, 0x5a, 0xca, 0xd0, 0xab, 0x0d, 0x99, 0x36, 0xc9, 0x3e, 0x74, 0x6b, 0x6f, + 0xc0, 0x78, 0xdb, 0xef, 0x7a, 0x91, 0xf0, 0xba, 0x14, 0xae, 0x5b, 0xcc, 0x65, 0x69, 0x4d, 0x2b, + 0xc7, 0x06, 0x56, 0x42, 0x23, 0x37, 0xf2, 0xd8, 0x1a, 0xb9, 0x0f, 0x61, 0xdc, 0xd3, 0x1e, 0x24, + 0x14, 0xdd, 0x60, 0x85, 0x76, 0x51, 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0x58, 0x5b, + 0x06, 0x1f, 0x4f, 0x5b, 0x76, 0xac, 0x57, 0x62, 0xfb, 0xef, 0x95, 0x32, 0x6e, 0x0c, 0x5c, 0x2b, + 0xf7, 0x8e, 0xa9, 0x95, 0xbb, 0x94, 0xd4, 0xca, 0xa5, 0x4c, 0x55, 0x86, 0x42, 0xae, 0xff, 0x0c, + 0xa6, 0x7d, 0xc7, 0xd2, 0xfe, 0x0b, 0x16, 0x9c, 0x61, 0xb6, 0x0f, 0xda, 0xc0, 0xc7, 0xb6, 0x77, + 0x30, 0x87, 0xd8, 0x5b, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x0b, 0x2e, 0xf6, 0x3a, 0x77, 0x99, + 0x7f, 0x71, 0x53, 0xb9, 0x57, 0xc4, 0xfe, 0xc5, 0xcd, 0xd5, 0x25, 0xcc, 0x20, 0xfd, 0x86, 0x5d, + 0xb4, 0xff, 0x4f, 0x0b, 0xca, 0x35, 0xbf, 0x79, 0x0c, 0x37, 0xfa, 0x2f, 0x19, 0x37, 0xfa, 0x67, + 0xb2, 0x4f, 0xfc, 0x66, 0xae, 0xb1, 0x6f, 0x39, 0x61, 0xec, 0x3b, 0x9f, 0x47, 0xa0, 0xd8, 0xb4, + 0xf7, 0xb7, 0xcb, 0x30, 0x56, 0xf3, 0x9b, 0x6a, 0x9f, 0xfd, 0x77, 0x8f, 0xf3, 0x8c, 0x28, 0x37, + 0x67, 0x99, 0x46, 0x99, 0xf9, 0x13, 0xcb, 0xa8, 0x17, 0x7f, 0xc6, 0x5e, 0x13, 0xdd, 0x23, 0xee, + 0xd6, 0x76, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0xbe, 0xd7, 0x44, 0x7f, 0x58, 0x86, 0xa9, 0x44, 0xeb, + 0xa8, 0x05, 0x13, 0x2d, 0xdd, 0x94, 0x24, 0xd6, 0xe9, 0x63, 0x59, 0xa1, 0xc4, 0x6b, 0x0c, 0xad, + 0x08, 0x9b, 0xc4, 0xd1, 0x1c, 0x80, 0xa7, 0xfb, 0xa4, 0xab, 0x98, 0xd0, 0x9a, 0x3f, 0xba, 0x86, + 0x81, 0xde, 0x84, 0xb1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x6b, 0xef, 0xa6, 0x8a, 0x8f, 0xac, 0x5c, + 0x96, 0x37, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x43, 0x98, 0x51, 0x44, 0xea, 0x4f, 0xc0, 0xbc, 0xc6, + 0xd4, 0x26, 0xeb, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x0b, 0x26, 0x99, 0xef, 0x34, 0xab, 0x7f, + 0x93, 0xec, 0xc9, 0xe0, 0xd2, 0x4c, 0xc2, 0x5e, 0x33, 0x20, 0x38, 0x81, 0x89, 0x16, 0x61, 0xa6, + 0xed, 0x86, 0x89, 0xea, 0x43, 0xac, 0x3a, 0xeb, 0xc0, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0x2f, + 0x88, 0x39, 0xf6, 0x22, 0xf7, 0xd3, 0xed, 0xf8, 0xc9, 0xde, 0x8e, 0xdf, 0xb7, 0x60, 0x9a, 0xb6, + 0xce, 0x1c, 0x42, 0xa5, 0x20, 0xa5, 0xd2, 0x8f, 0x58, 0x05, 0xe9, 0x47, 0x2e, 0x51, 0xb6, 0xdd, + 0xf4, 0xbb, 0x91, 0xd0, 0x8e, 0x6a, 0x7c, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0xc4, + 0xab, 0x7b, 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0x64, 0x20, 0x3b, 0x3b, 0x09, 0x0f, + 0x32, 0x2f, 0xfc, 0xe8, 0x84, 0x48, 0xab, 0x05, 0x99, 0x97, 0x0e, 0x76, 0x31, 0x8e, 0xfd, 0xd7, + 0xca, 0x50, 0xa9, 0xf9, 0xcd, 0x45, 0x12, 0x44, 0xee, 0xa6, 0xdb, 0x70, 0x22, 0xa2, 0xe5, 0xdb, + 0x7d, 0x0d, 0x80, 0x3d, 0x22, 0x0b, 0xb2, 0x22, 0xa8, 0xd7, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x2a, + 0xd9, 0x21, 0x7b, 0xda, 0xc9, 0xab, 0xa4, 0x92, 0x9b, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0xc5, 0x42, + 0x19, 0x2d, 0x3f, 0xec, 0xb8, 0x01, 0xcf, 0x4c, 0x4e, 0x1a, 0xbe, 0xd7, 0x0c, 0x45, 0xe0, 0xb7, + 0x8a, 0x08, 0x44, 0x94, 0x82, 0xe3, 0xcc, 0x5a, 0xa8, 0x06, 0x27, 0x1b, 0x01, 0x69, 0x12, 0x2f, + 0x72, 0x9d, 0xd6, 0x42, 0xd7, 0x6b, 0xb6, 0x78, 0x4a, 0x9e, 0x01, 0x23, 0x83, 0xe8, 0xc9, 0xc5, + 0x0c, 0x1c, 0x9c, 0x59, 0x53, 0x7c, 0x0a, 0x23, 0x32, 0x98, 0xfa, 0x14, 0x56, 0x4f, 0xc2, 0x59, + 0xe3, 0xf1, 0x10, 0x2e, 0x6e, 0x3b, 0xae, 0xc7, 0xea, 0x0d, 0x25, 0x1a, 0xcf, 0xc0, 0xc1, 0x99, + 0x35, 0xed, 0x3f, 0x2d, 0xc3, 0x38, 0x9d, 0x18, 0xe5, 0x71, 0xf3, 0x86, 0xe1, 0x71, 0x73, 0x31, + 0xe1, 0x71, 0x33, 0xad, 0xe3, 0x6a, 0xfe, 0x35, 0xef, 0x03, 0xf2, 0x45, 0x52, 0x82, 0xeb, 0xc4, + 0x23, 0x7c, 0xc8, 0x98, 0x92, 0xb1, 0x1c, 0xfb, 0xa3, 0xdc, 0x4e, 0x61, 0xe0, 0x8c, 0x5a, 0x9f, + 0xfa, 0xea, 0x1c, 0xaf, 0xaf, 0xce, 0x6f, 0x59, 0x6c, 0x05, 0x2c, 0xad, 0xd7, 0xb9, 0x13, 0x39, + 0xba, 0x0a, 0x63, 0xec, 0x18, 0x63, 0xb1, 0x3c, 0xa4, 0x4b, 0x0b, 0xcb, 0x6e, 0xbb, 0x1e, 0x17, + 0x63, 0x1d, 0x07, 0x5d, 0x86, 0x91, 0x90, 0x38, 0x41, 0x63, 0x5b, 0x9d, 0xe1, 0xc2, 0xff, 0x84, + 0x97, 0x61, 0x05, 0x45, 0x1f, 0xc4, 0x11, 0xe1, 0xcb, 0xf9, 0x1e, 0xe9, 0x7a, 0x7f, 0x38, 0x1f, + 0xcc, 0x0f, 0x03, 0x6f, 0xdf, 0x03, 0x94, 0xc6, 0xef, 0xe3, 0x89, 0x5f, 0xd5, 0x8c, 0x59, 0x3c, + 0x9a, 0x8a, 0x57, 0xfc, 0xef, 0x2d, 0x98, 0xac, 0xf9, 0x4d, 0xca, 0x9f, 0x7f, 0x98, 0x98, 0xb1, + 0x9e, 0xc1, 0x63, 0xa8, 0x20, 0x83, 0xc7, 0x81, 0x05, 0x17, 0xd8, 0xe7, 0x47, 0xc4, 0x6b, 0xc6, + 0x06, 0x4f, 0xdd, 0xdf, 0xe3, 0x01, 0x4c, 0x05, 0x3c, 0x7c, 0xd7, 0x9a, 0xd3, 0xe9, 0xb8, 0xde, + 0x96, 0x7c, 0xdf, 0xf6, 0x46, 0xe1, 0xbb, 0x8d, 0x24, 0x49, 0x11, 0x02, 0x4c, 0x77, 0x54, 0x35, + 0x88, 0xe2, 0x64, 0x2b, 0x3c, 0x2b, 0x8d, 0xd6, 0x1f, 0x2d, 0x41, 0xa5, 0x96, 0x95, 0x26, 0x81, + 0x80, 0xd3, 0x75, 0xec, 0xe7, 0x60, 0xb0, 0xe6, 0x37, 0x7b, 0x04, 0x8f, 0xfe, 0x3b, 0x16, 0x0c, + 0xd7, 0xfc, 0xe6, 0x31, 0x98, 0x10, 0xdf, 0x31, 0x4d, 0x88, 0x67, 0x72, 0x36, 0x47, 0x8e, 0xd5, + 0xf0, 0x9f, 0x0d, 0xc0, 0x04, 0xed, 0xa7, 0xbf, 0x25, 0xd7, 0xab, 0xb1, 0x36, 0xac, 0x3e, 0xd6, + 0x06, 0xbd, 0xd0, 0xfa, 0xad, 0x96, 0xff, 0x20, 0xb9, 0x76, 0x57, 0x58, 0x29, 0x16, 0x50, 0xf4, + 0x0a, 0x8c, 0x74, 0x02, 0xb2, 0xeb, 0xfa, 0xe2, 0xa6, 0xa8, 0x19, 0x64, 0x6b, 0xa2, 0x1c, 0x2b, + 0x0c, 0xf4, 0x06, 0x8c, 0x87, 0xae, 0x47, 0xa5, 0x62, 0x7e, 0xf4, 0x0e, 0xb0, 0x83, 0x81, 0xe7, + 0xd2, 0xd3, 0xca, 0xb1, 0x81, 0x85, 0xee, 0xc1, 0x28, 0xfb, 0xcf, 0x78, 0xeb, 0xe0, 0x91, 0x79, + 0xab, 0x48, 0x94, 0x2e, 0x08, 0xe0, 0x98, 0x16, 0x15, 0x38, 0x22, 0x99, 0x8f, 0x2a, 0x14, 0x41, + 0x84, 0x95, 0xc0, 0xa1, 0x32, 0x55, 0x85, 0x58, 0xc3, 0x42, 0x2f, 0xc3, 0x68, 0xe4, 0xb8, 0xad, + 0x5b, 0xae, 0xc7, 0x3c, 0x51, 0x68, 0xff, 0x45, 0xbe, 0x72, 0x51, 0x88, 0x63, 0x38, 0xbd, 0xd5, + 0xb0, 0xd8, 0x6a, 0x0b, 0x7b, 0x91, 0xc8, 0xa2, 0x59, 0xe6, 0xb7, 0x9a, 0x5b, 0xaa, 0x14, 0x6b, + 0x18, 0x68, 0x1b, 0xce, 0xb9, 0x1e, 0xcb, 0x3b, 0x47, 0xea, 0x3b, 0x6e, 0x67, 0xe3, 0x56, 0xfd, + 0x2e, 0x09, 0xdc, 0xcd, 0xbd, 0x05, 0xa7, 0xb1, 0x43, 0xbc, 0x26, 0x53, 0x7a, 0x8d, 0x2c, 0x3c, + 0x2f, 0xba, 0x78, 0x6e, 0xb5, 0x00, 0x17, 0x17, 0x52, 0x42, 0x36, 0xe5, 0x39, 0x01, 0x71, 0xda, + 0x42, 0xbb, 0xc5, 0x73, 0x56, 0xb1, 0x12, 0x2c, 0x20, 0xf6, 0xeb, 0x6c, 0x4f, 0xdc, 0xae, 0xa3, + 0xcf, 0x1a, 0x3c, 0xf4, 0xb4, 0xce, 0x43, 0x0f, 0xf7, 0xab, 0x43, 0xb7, 0xeb, 0x5a, 0x9c, 0xad, + 0x6b, 0x70, 0xaa, 0xe6, 0x37, 0x6b, 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x03, 0x27, 0x68, 0xca, 0x25, + 0x58, 0x95, 0x91, 0xc6, 0x28, 0x67, 0x18, 0xe4, 0x6c, 0xd6, 0x88, 0x22, 0xf6, 0x3a, 0xbb, 0x9f, + 0x1c, 0xf1, 0x61, 0x77, 0x83, 0x49, 0xca, 0x2a, 0xbb, 0xe3, 0x75, 0x27, 0x22, 0xe8, 0x36, 0x4c, + 0x34, 0x74, 0xd9, 0x44, 0x54, 0x7f, 0x49, 0x9e, 0xe8, 0x86, 0xe0, 0x92, 0x29, 0xcc, 0x98, 0xf5, + 0xed, 0xdf, 0xb7, 0x44, 0x2b, 0x1a, 0xd7, 0xe8, 0xe3, 0x60, 0x59, 0xcc, 0x62, 0x4e, 0xfc, 0xa6, + 0x7a, 0xaa, 0x5f, 0xc6, 0x84, 0xbe, 0x06, 0x67, 0x8d, 0x42, 0xe9, 0x14, 0xa2, 0xe5, 0xdf, 0x67, + 0x9a, 0x49, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x06, 0xa7, 0x93, 0xdf, 0x25, 0x38, 0xfa, + 0x63, 0x7e, 0x5d, 0xe9, 0x68, 0x5f, 0x67, 0xbf, 0x09, 0x33, 0x35, 0x5f, 0x8b, 0xa2, 0xc2, 0xe6, + 0xaf, 0x77, 0x30, 0xb7, 0x5f, 0x1e, 0x61, 0x67, 0x7d, 0x22, 0x65, 0x23, 0xfa, 0x06, 0x4c, 0x86, + 0x84, 0x45, 0x30, 0x94, 0x3a, 0xea, 0x82, 0xa8, 0x0c, 0xf5, 0x65, 0x1d, 0x93, 0xdf, 0xc3, 0xcd, + 0x32, 0x9c, 0xa0, 0x86, 0xda, 0x30, 0xf9, 0xc0, 0xf5, 0x9a, 0xfe, 0x83, 0x50, 0xd2, 0x1f, 0xc9, + 0x37, 0x78, 0xdd, 0xe3, 0x98, 0x89, 0x3e, 0x1a, 0xcd, 0xdd, 0x33, 0x88, 0xe1, 0x04, 0x71, 0xca, + 0x6a, 0x82, 0xae, 0x37, 0x1f, 0xde, 0x09, 0x49, 0x20, 0xe2, 0x2b, 0x32, 0x56, 0x83, 0x65, 0x21, + 0x8e, 0xe1, 0x94, 0xd5, 0xb0, 0x3f, 0x2c, 0xac, 0x03, 0xe3, 0x65, 0x82, 0xd5, 0x60, 0x55, 0x8a, + 0x35, 0x0c, 0xca, 0x8a, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, 0xbe, 0x1f, 0x49, 0xe6, 0xcd, 0xb2, 0xea, + 0x6a, 0xe5, 0xd8, 0xc0, 0xca, 0x89, 0xe6, 0x38, 0x70, 0xd4, 0x68, 0x8e, 0x28, 0x2a, 0x88, 0x64, + 0xc1, 0xe3, 0x91, 0x5f, 0x2b, 0x8a, 0x64, 0x71, 0xf8, 0x58, 0x51, 0x2e, 0xa8, 0xc0, 0xb3, 0x29, + 0x06, 0x68, 0x90, 0x87, 0xab, 0x64, 0x26, 0xf9, 0x3a, 0x1f, 0x1d, 0x09, 0x43, 0xcb, 0x30, 0x1c, + 0xee, 0x85, 0x8d, 0xa8, 0x15, 0x16, 0x65, 0x4e, 0xae, 0x33, 0x94, 0x58, 0x1e, 0xe5, 0xff, 0x43, + 0x2c, 0xeb, 0xa2, 0x06, 0x9c, 0x10, 0x14, 0x17, 0xb7, 0x1d, 0x4f, 0x65, 0x56, 0xe5, 0xbe, 0xb7, + 0x57, 0x0f, 0xf6, 0xab, 0x27, 0x44, 0xcb, 0x3a, 0xf8, 0x70, 0xbf, 0x4a, 0xb7, 0x64, 0x06, 0x04, + 0x67, 0x51, 0xe3, 0x4b, 0xbe, 0xd1, 0xf0, 0xdb, 0x9d, 0x5a, 0xe0, 0x6f, 0xba, 0x2d, 0x52, 0xe4, + 0xd6, 0x50, 0x37, 0x30, 0xc5, 0x92, 0x37, 0xca, 0x70, 0x82, 0x1a, 0xba, 0x0f, 0x53, 0x4e, 0xa7, + 0x33, 0x1f, 0xb4, 0xfd, 0x40, 0x36, 0x30, 0x96, 0x6f, 0x1f, 0x9b, 0x37, 0x51, 0x79, 0x62, 0xd5, + 0x44, 0x21, 0x4e, 0x12, 0xa4, 0x03, 0x25, 0x36, 0x9a, 0x31, 0x50, 0x13, 0xf1, 0x40, 0x89, 0x7d, + 0x99, 0x31, 0x50, 0x19, 0x10, 0x9c, 0x45, 0xcd, 0xfe, 0xf3, 0xec, 0x76, 0xc3, 0xa2, 0x9d, 0xb3, + 0x47, 0x6e, 0x6d, 0x98, 0xe8, 0x30, 0xb6, 0x2f, 0x92, 0x1e, 0x0a, 0x56, 0xf1, 0x46, 0x9f, 0x6a, + 0xf8, 0x07, 0x2c, 0xab, 0xb3, 0xe1, 0x8e, 0x5d, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xaf, 0x67, + 0x99, 0xe8, 0x58, 0xe7, 0xba, 0xf5, 0x61, 0xf1, 0xe4, 0x57, 0x48, 0xc9, 0xb3, 0xf9, 0x56, 0xac, + 0x78, 0x7d, 0x89, 0x67, 0xc3, 0x58, 0xd6, 0x45, 0x5f, 0x87, 0x49, 0xd7, 0x73, 0xe3, 0x24, 0xeb, + 0x61, 0xe5, 0x64, 0x7e, 0x2c, 0x39, 0x85, 0xa5, 0x27, 0x44, 0xd5, 0x2b, 0xe3, 0x04, 0x31, 0xf4, + 0x01, 0xf3, 0x50, 0x96, 0xa4, 0x4b, 0xfd, 0x90, 0xd6, 0x9d, 0x91, 0x25, 0x59, 0x8d, 0x08, 0xea, + 0xc2, 0x89, 0x74, 0xb2, 0xf9, 0xb0, 0x62, 0xe7, 0x5f, 0x00, 0xd3, 0xf9, 0xe2, 0xe3, 0xcc, 0x95, + 0x69, 0x58, 0x88, 0xb3, 0xe8, 0xa3, 0x5b, 0xc9, 0x54, 0xe0, 0x65, 0xc3, 0xfe, 0x95, 0x4a, 0x07, + 0x3e, 0x51, 0x98, 0x05, 0x7c, 0x0b, 0xce, 0x6b, 0x79, 0x8d, 0xaf, 0x07, 0x0e, 0xf3, 0x90, 0x73, + 0xd9, 0x69, 0xa4, 0x09, 0xb5, 0xcf, 0x1e, 0xec, 0x57, 0xcf, 0x6f, 0x14, 0x21, 0xe2, 0x62, 0x3a, + 0xe8, 0x36, 0x9c, 0xe2, 0x91, 0x90, 0x96, 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x92, 0x9a, 0x39, 0xef, + 0x3a, 0x7b, 0xb0, 0x5f, 0x3d, 0x35, 0x9f, 0x85, 0x80, 0xb3, 0xeb, 0xa1, 0x77, 0x60, 0xb4, 0xe9, + 0x49, 0x2e, 0x3b, 0x64, 0xa4, 0x8e, 0x1e, 0x5d, 0x5a, 0xaf, 0xab, 0xef, 0x8f, 0xff, 0xe0, 0xb8, + 0x02, 0xda, 0xe2, 0x06, 0x58, 0xa5, 0x35, 0x1f, 0x4e, 0x05, 0xc8, 0x4d, 0x1a, 0x96, 0x8c, 0xd0, + 0x22, 0xdc, 0xf3, 0x40, 0x3d, 0x3f, 0x35, 0xa2, 0x8e, 0x18, 0x84, 0xd1, 0xfb, 0x80, 0x44, 0xbe, + 0xaf, 0xf9, 0x06, 0xcb, 0xa8, 0xa9, 0x79, 0x45, 0x2b, 0x3d, 0x49, 0x3d, 0x85, 0x81, 0x33, 0x6a, + 0xa1, 0x1b, 0x94, 0x3d, 0xea, 0xa5, 0x82, 0xfd, 0x4a, 0x7d, 0x56, 0x65, 0x89, 0x74, 0x02, 0xc2, + 0x1c, 0x79, 0x4d, 0x8a, 0x38, 0x51, 0x0f, 0x35, 0xe1, 0x9c, 0xd3, 0x8d, 0x7c, 0x66, 0xdb, 0x36, + 0x51, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0xb7, 0x92, 0x11, 0x16, 0x78, 0xf7, 0xdc, 0x7c, 0x01, 0x1e, + 0x2e, 0xa4, 0x42, 0xaf, 0x53, 0x74, 0x2c, 0x34, 0xb3, 0xb3, 0x11, 0x25, 0x81, 0xfb, 0x62, 0x48, + 0x0c, 0xf4, 0x26, 0x8c, 0x6d, 0xfb, 0x61, 0xb4, 0x4e, 0xa2, 0x07, 0x7e, 0xb0, 0x23, 0x12, 0x8c, + 0xc4, 0x49, 0x9d, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x4b, 0x30, 0xcc, 0x9c, 0x1e, 0x57, 0x97, 0xd8, + 0x59, 0x3b, 0x12, 0xf3, 0x98, 0x1b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0xad, 0x2d, 0x32, 0x76, + 0x9c, 0x40, 0x5d, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x76, 0x02, 0x52, 0x0b, 0xfc, + 0x06, 0x09, 0xb5, 0x54, 0x62, 0xcf, 0xf0, 0xf4, 0x29, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76, + 0x3d, 0x44, 0xd2, 0x39, 0xbd, 0x27, 0xf3, 0x8d, 0xfe, 0x69, 0x71, 0xb0, 0xcf, 0xb4, 0xde, 0x1e, + 0x4c, 0xab, 0x6c, 0xe2, 0x3c, 0x61, 0x4a, 0x58, 0x99, 0xca, 0xcf, 0xe9, 0x9f, 0xf9, 0xd6, 0x47, + 0xb9, 0x51, 0xac, 0x26, 0x28, 0xe1, 0x14, 0x6d, 0x23, 0xb2, 0xf3, 0x74, 0xcf, 0xc8, 0xce, 0x57, + 0x60, 0x34, 0xec, 0xde, 0x6f, 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0xef, 0x98, 0x76, 0x71, 0xaf, 0x4b, + 0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x8c, 0x38, 0xd2, 0x47, 0x02, 0xe5, 0x07, 0xad, 0x54, 0x9e, 0x11, + 0x3c, 0x8e, 0x9b, 0xf4, 0x8a, 0x50, 0x75, 0xd1, 0xdb, 0x30, 0x21, 0x02, 0xe3, 0x08, 0x7d, 0xfc, + 0x09, 0xf3, 0x29, 0x7f, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xdd, 0x81, 0xb1, 0xc8, 0x6f, 0x09, 0x45, + 0x6e, 0x58, 0x39, 0x9d, 0x1f, 0x5b, 0x7a, 0x43, 0xa1, 0xe9, 0xd6, 0x3b, 0x55, 0x15, 0xeb, 0x74, + 0xd0, 0x06, 0x5f, 0xef, 0x2c, 0x71, 0x18, 0x09, 0x2b, 0x67, 0xf2, 0xcf, 0x24, 0x95, 0x5f, 0xcc, + 0xdc, 0x0e, 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0xeb, 0x30, 0xd3, 0x09, 0x5c, 0x9f, 0xad, 0x09, 0xe5, + 0xf3, 0x51, 0x31, 0x75, 0x48, 0xb5, 0x24, 0x02, 0x4e, 0xd7, 0x61, 0x71, 0x8d, 0x44, 0x61, 0xe5, + 0x2c, 0x4f, 0x75, 0xc8, 0xf5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x9c, 0x98, 0xeb, 0x29, + 0x2b, 0xb3, 0xf9, 0xd1, 0x32, 0x74, 0x7d, 0x26, 0x97, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x4d, + 0x98, 0x0c, 0xf4, 0x1b, 0x70, 0x58, 0x39, 0x57, 0xe0, 0x79, 0x9e, 0xb8, 0x2e, 0xc7, 0x02, 0x81, + 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1e, 0x4c, 0x8b, 0xa0, 0x1f, 0xf1, 0x30, 0x9d, 0x8f, 0x5f, + 0xe7, 0xe1, 0x04, 0x0c, 0xa7, 0xb0, 0x79, 0xaa, 0x41, 0xe7, 0x7e, 0x8b, 0x08, 0xd6, 0x77, 0xcb, + 0xf5, 0x76, 0xc2, 0xca, 0x05, 0xc6, 0x1f, 0x44, 0xaa, 0xc1, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b, + 0x30, 0xdd, 0x09, 0x08, 0x69, 0xb3, 0x7b, 0x92, 0x38, 0xcf, 0xaa, 0x3c, 0xac, 0x17, 0xed, 0x49, + 0x2d, 0x01, 0x3b, 0xcc, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x01, 0x8c, 0xf8, 0xbb, 0x24, 0xd8, 0x26, + 0x4e, 0xb3, 0x72, 0xb1, 0xe0, 0xcd, 0xa8, 0x38, 0xdc, 0x6e, 0x0b, 0xdc, 0x84, 0x4b, 0x9d, 0x2c, + 0xee, 0xed, 0x52, 0x27, 0x1b, 0x43, 0xff, 0x89, 0x05, 0x67, 0xa5, 0x91, 0xba, 0xde, 0xa1, 0xa3, + 0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0x40, 0x54, 0xcf, 0xe6, 0x07, 0x67, 0xda, 0xc8, 0xa9, 0xa4, + 0x4c, 0x25, 0x67, 0xf3, 0x30, 0x42, 0x9c, 0xdf, 0x22, 0xbd, 0xd9, 0x87, 0x24, 0x92, 0xcc, 0x68, + 0x3e, 0x5c, 0xf9, 0x60, 0x69, 0xbd, 0xf2, 0x1c, 0x8f, 0xa2, 0x45, 0x37, 0x43, 0x3d, 0x09, 0xc4, + 0x69, 0x7c, 0x74, 0x15, 0x4a, 0x7e, 0x58, 0x79, 0x9e, 0xad, 0xed, 0xb3, 0x39, 0xe3, 0x78, 0xbb, + 0xce, 0x5d, 0xab, 0x6f, 0xd7, 0x71, 0xc9, 0x0f, 0x65, 0xba, 0x3f, 0x7a, 0x9d, 0x0d, 0x2b, 0x2f, + 0x70, 0xc5, 0xba, 0x4c, 0xf7, 0xc7, 0x0a, 0x71, 0x0c, 0x47, 0xdb, 0x30, 0x15, 0x1a, 0x6a, 0x83, + 0xb0, 0x72, 0x89, 0x8d, 0xd4, 0x0b, 0x79, 0x93, 0x66, 0x60, 0x6b, 0x79, 0xb8, 0x4c, 0x2a, 0x38, + 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x71, 0x11, 0x56, 0x5e, 0xec, 0xb1, 0xbb, 0x34, 0x64, 0x7d, 0x77, + 0xe9, 0x34, 0x70, 0x82, 0x26, 0xba, 0xa3, 0x3f, 0xc8, 0xbd, 0x9c, 0xef, 0xa6, 0x9b, 0xf9, 0x14, + 0x77, 0x22, 0xf7, 0x19, 0xee, 0x7b, 0x30, 0x2d, 0xcf, 0x12, 0xba, 0x32, 0x03, 0xb7, 0x49, 0x2a, + 0x2f, 0xc5, 0x9b, 0xf6, 0x46, 0x02, 0x86, 0x53, 0xd8, 0xb3, 0x3f, 0x02, 0x33, 0x29, 0x39, 0xee, + 0x28, 0xef, 0x9b, 0x66, 0x77, 0x60, 0xc2, 0xd8, 0x2b, 0x4f, 0xd7, 0xfd, 0x6d, 0x0c, 0x46, 0x95, + 0x5b, 0x52, 0x8e, 0x39, 0x72, 0xe6, 0xb1, 0xcc, 0x91, 0x57, 0x4c, 0xef, 0xb9, 0xb3, 0x49, 0xef, + 0xb9, 0x91, 0x9a, 0xdf, 0x34, 0x1c, 0xe6, 0x36, 0x32, 0x22, 0x60, 0xe7, 0x71, 0xf9, 0xfe, 0x1f, + 0x74, 0x6a, 0x16, 0xbd, 0x72, 0xdf, 0x6e, 0x78, 0x03, 0x85, 0x46, 0xc2, 0xeb, 0x30, 0xe3, 0xf9, + 0xec, 0x22, 0x42, 0x9a, 0x52, 0xca, 0x64, 0xc2, 0xe4, 0xa8, 0x1e, 0xa1, 0x31, 0x81, 0x80, 0xd3, + 0x75, 0x68, 0x83, 0x5c, 0x1a, 0x4c, 0x5a, 0x25, 0xb9, 0xb0, 0x88, 0x05, 0x94, 0x5e, 0x80, 0xf9, + 0xaf, 0xb0, 0x32, 0x9d, 0x7f, 0x01, 0xe6, 0x95, 0x92, 0x12, 0x67, 0x28, 0x25, 0x4e, 0x66, 0x84, + 0xeb, 0xf8, 0xcd, 0xd5, 0x9a, 0xb8, 0xcb, 0x68, 0xb9, 0x29, 0x9a, 0xab, 0x35, 0xcc, 0x61, 0x68, + 0x1e, 0x86, 0xd8, 0x0f, 0x19, 0xf9, 0x2a, 0x8f, 0x17, 0xad, 0xd6, 0xb4, 0x9c, 0xca, 0xac, 0x02, + 0x16, 0x15, 0x99, 0xfd, 0x81, 0x5e, 0x00, 0x99, 0xfd, 0x61, 0xf8, 0x31, 0xed, 0x0f, 0x92, 0x00, + 0x8e, 0x69, 0xa1, 0x87, 0x70, 0xca, 0xb8, 0x74, 0xab, 0x17, 0xae, 0x90, 0xef, 0x64, 0x93, 0x40, + 0x5e, 0x38, 0x2f, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xa6, 0x91, + 0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0xba, 0x48, 0xb7, 0x98, 0x26, 0x8c, 0xde, 0x86, 0x91, 0x8f, + 0x7c, 0xee, 0x10, 0x2b, 0xee, 0x5f, 0x32, 0x3e, 0xd3, 0xc8, 0x07, 0xb7, 0xeb, 0xac, 0xfc, 0x70, + 0xbf, 0x3a, 0x56, 0xf3, 0x9b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x5f, 0xb2, 0x60, 0x36, 0x7d, 0xab, + 0x57, 0x9d, 0x9e, 0xe8, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x97, 0x1c, 0x2e, 0x68, 0x0a, + 0x7d, 0x91, 0xee, 0xa7, 0xd0, 0x7d, 0xc4, 0x5f, 0xb8, 0x68, 0x0e, 0x09, 0x98, 0x95, 0x1e, 0xee, + 0x57, 0xa7, 0x38, 0xfb, 0x77, 0x1f, 0xa9, 0x2c, 0x1a, 0xbc, 0x02, 0xfa, 0x31, 0x38, 0x15, 0xa4, + 0xb5, 0xec, 0x44, 0xde, 0x34, 0x3e, 0xdb, 0xcf, 0x51, 0x92, 0x9c, 0x70, 0x9c, 0x45, 0x10, 0x67, + 0xb7, 0x83, 0xfe, 0xaa, 0x05, 0xcf, 0x90, 0x7c, 0x0b, 0xae, 0xb8, 0x2a, 0xbc, 0x96, 0xd3, 0x8f, + 0x02, 0xdb, 0x2f, 0x4b, 0x30, 0xf0, 0x4c, 0x01, 0x02, 0x2e, 0x6a, 0xd7, 0xfe, 0xc7, 0x16, 0xb3, + 0xfa, 0x08, 0x54, 0x12, 0x76, 0x5b, 0xd1, 0x31, 0x38, 0xc7, 0x2e, 0x1b, 0xae, 0x25, 0x8f, 0xed, + 0xdd, 0xfa, 0xdf, 0x5a, 0xcc, 0xbb, 0xf5, 0x18, 0xdf, 0xe9, 0x7e, 0x00, 0x23, 0x91, 0x68, 0x4d, + 0x74, 0x3d, 0xcf, 0x13, 0x4f, 0x76, 0x8a, 0x79, 0xf8, 0xaa, 0x1b, 0xa6, 0x2c, 0xc5, 0x8a, 0x8c, + 0xfd, 0x5f, 0xf1, 0x19, 0x90, 0x90, 0x63, 0x30, 0x6e, 0x2f, 0x99, 0xc6, 0xed, 0x6a, 0x8f, 0x2f, + 0xc8, 0x31, 0x72, 0xff, 0x03, 0xb3, 0xdf, 0x4c, 0xb3, 0xfa, 0x49, 0x77, 0xab, 0xb6, 0xbf, 0x6b, + 0x01, 0xc4, 0xe9, 0x94, 0xfa, 0x48, 0x8c, 0x7f, 0x8d, 0xde, 0x29, 0xfd, 0xc8, 0x6f, 0xf8, 0x2d, + 0x61, 0x5c, 0x3b, 0x17, 0xdb, 0xd7, 0x79, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x54, 0x95, 0xf1, + 0xcd, 0xcb, 0xb1, 0x5b, 0x8b, 0x11, 0xdb, 0xfc, 0x67, 0x2c, 0x38, 0x99, 0xf5, 0xe8, 0x0b, 0xbd, + 0x02, 0x23, 0x5c, 0xc7, 0xac, 0x5c, 0xde, 0xd5, 0x6c, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xf4, 0xeb, + 0xfa, 0x7e, 0xc4, 0x54, 0x3f, 0xb7, 0x61, 0xa2, 0x16, 0x10, 0x4d, 0xee, 0x79, 0x37, 0xce, 0x42, + 0x36, 0xba, 0xf0, 0xca, 0x91, 0x23, 0xa9, 0xd9, 0xbf, 0x54, 0x82, 0x93, 0xdc, 0x71, 0x73, 0x7e, + 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x29, 0x9e, 0xea, 0x7f, 0x15, 0xc6, 0x3b, 0x9a, 0x61, 0xa0, 0x28, + 0x6d, 0x85, 0x6e, 0x40, 0x88, 0x55, 0x99, 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x26, 0x8c, 0x93, 0x5d, + 0xb7, 0xa1, 0x1c, 0xc3, 0x4a, 0x47, 0x16, 0x1e, 0x54, 0x2b, 0xcb, 0x1a, 0x1d, 0x6c, 0x50, 0xed, + 0xfb, 0xb9, 0x85, 0x26, 0x3a, 0x0e, 0xf4, 0x70, 0x06, 0xfb, 0x59, 0x0b, 0xce, 0xe4, 0x24, 0xb9, + 0xa0, 0xcd, 0x3d, 0x60, 0x2e, 0xb2, 0x62, 0xd9, 0xaa, 0xe6, 0xb8, 0xe3, 0x2c, 0x16, 0x50, 0xf4, + 0x65, 0x80, 0x4e, 0x9c, 0x1a, 0xb8, 0x47, 0x36, 0x00, 0x23, 0x2e, 0xb8, 0x16, 0xe2, 0x59, 0x65, + 0x10, 0xd6, 0x68, 0xd9, 0x3f, 0x33, 0x00, 0x83, 0xcc, 0x07, 0x0f, 0xd5, 0x60, 0x78, 0x9b, 0x47, + 0x20, 0x2d, 0x9c, 0x37, 0x8a, 0x2b, 0x43, 0x9a, 0xc6, 0xf3, 0xa6, 0x95, 0x62, 0x49, 0x06, 0xad, + 0xc1, 0x09, 0x9e, 0xf6, 0xb8, 0xb5, 0x44, 0x5a, 0xce, 0x9e, 0xd4, 0xb9, 0x97, 0xd8, 0xa7, 0x2a, + 0xdb, 0xc3, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc2, 0x64, 0xe4, 0xb6, 0x89, 0xdf, 0x8d, + 0x4c, 0x77, 0x53, 0x75, 0x2d, 0xdc, 0x30, 0xa0, 0x38, 0x81, 0x8d, 0xde, 0x86, 0x89, 0x4e, 0xca, + 0xba, 0x30, 0x18, 0xab, 0xe1, 0x4c, 0x8b, 0x82, 0x89, 0xcb, 0xde, 0x7d, 0x75, 0xd9, 0x2b, 0xb7, + 0x8d, 0xed, 0x80, 0x84, 0xdb, 0x7e, 0xab, 0xc9, 0x24, 0xf3, 0x41, 0xed, 0xdd, 0x57, 0x02, 0x8e, + 0x53, 0x35, 0x28, 0x95, 0x4d, 0xc7, 0x6d, 0x75, 0x03, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x24, + 0xe0, 0x38, 0x55, 0xa3, 0xb7, 0xd9, 0x64, 0xf8, 0xc9, 0x98, 0x4d, 0xec, 0xbf, 0x5b, 0x02, 0x63, + 0x6a, 0x7f, 0x88, 0xb3, 0x18, 0xbf, 0x03, 0x03, 0x5b, 0x41, 0xa7, 0x21, 0xfc, 0x4d, 0x33, 0xbf, + 0xec, 0x3a, 0xae, 0x2d, 0xea, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0x53, 0xc2, 0xfb, + 0x5a, 0x06, 0x29, 0x56, 0xcf, 0x2b, 0x87, 0xa5, 0x26, 0xa2, 0x20, 0x9c, 0xbf, 0x78, 0x23, 0xa6, + 0xfc, 0xb7, 0x35, 0x53, 0xb8, 0xd0, 0x43, 0x48, 0x2a, 0xe8, 0x2a, 0x8c, 0x89, 0xc4, 0xb2, 0xec, + 0x15, 0x20, 0xdf, 0x4c, 0xcc, 0x95, 0x74, 0x29, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0x97, 0x4b, 0x70, + 0x22, 0xe3, 0x19, 0x37, 0x3f, 0x46, 0xb6, 0xdc, 0x30, 0x0a, 0xf6, 0x92, 0x87, 0x13, 0x16, 0xe5, + 0x58, 0x61, 0x50, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0x49, 0x0a, 0xe8, 0xd1, 0x0e, + 0x27, 0x7a, 0x6c, 0x77, 0x43, 0x22, 0x33, 0x87, 0xa8, 0x63, 0x9b, 0xb9, 0x64, 0x30, 0x08, 0xbd, + 0x9a, 0x6e, 0x29, 0x3f, 0x03, 0xed, 0x6a, 0xca, 0x3d, 0x0d, 0x38, 0x8c, 0x76, 0x2e, 0x22, 0x9e, + 0xe3, 0x45, 0xe2, 0x02, 0x1b, 0x47, 0x94, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xef, 0x95, 0xe1, 0x6c, + 0x6e, 0x60, 0x07, 0xda, 0xf5, 0xb6, 0xef, 0xb9, 0x91, 0xaf, 0x7c, 0x74, 0x79, 0x14, 0x79, 0xd2, + 0xd9, 0x5e, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x4b, 0x30, 0xc8, 0x2c, 0x12, 0xc9, 0xa4, 0x92, 0x78, + 0x61, 0x89, 0xc7, 0xd8, 0xe5, 0x60, 0xed, 0x54, 0x2f, 0x17, 0x9e, 0xea, 0xcf, 0x51, 0x09, 0xc6, + 0x6f, 0x25, 0x0f, 0x14, 0xda, 0x5d, 0xdf, 0x6f, 0x61, 0x06, 0x44, 0x2f, 0x88, 0xf1, 0x4a, 0x38, + 0xa5, 0x62, 0xa7, 0xe9, 0x87, 0xda, 0xa0, 0x71, 0x07, 0xf8, 0xc0, 0xf5, 0xb6, 0x92, 0xce, 0xca, + 0x37, 0x79, 0x31, 0x96, 0x70, 0xba, 0x97, 0xe2, 0xdc, 0xf8, 0xc3, 0xf9, 0x7b, 0x49, 0x65, 0xc0, + 0xef, 0x99, 0x16, 0x5f, 0x5f, 0x01, 0x23, 0x3d, 0xc5, 0x93, 0x9f, 0x2a, 0xc3, 0x14, 0x5e, 0x58, + 0xfa, 0x74, 0x22, 0xee, 0xa4, 0x27, 0xa2, 0x7f, 0xb3, 0xd9, 0x93, 0x9a, 0x8d, 0x7f, 0x68, 0xc1, + 0x14, 0x4b, 0x6f, 0x2b, 0xa2, 0x32, 0xb9, 0xbe, 0x77, 0x0c, 0x57, 0x81, 0xe7, 0x60, 0x30, 0xa0, + 0x8d, 0x8a, 0x19, 0x54, 0x7b, 0x9c, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x07, 0x03, 0xac, 0x0b, 0x74, + 0xf2, 0xc6, 0x39, 0x0b, 0x5e, 0x72, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x1f, 0x16, 0x93, 0x4e, 0xcb, + 0xe5, 0x9d, 0x8e, 0xfd, 0x45, 0x3e, 0x19, 0x21, 0x9f, 0x32, 0xbb, 0xf6, 0xf1, 0xe2, 0xc3, 0x66, + 0x93, 0x2c, 0xbe, 0x66, 0xff, 0x71, 0x09, 0x2e, 0x64, 0xd6, 0xeb, 0x3b, 0x3e, 0x6c, 0x71, 0xed, + 0xa7, 0x99, 0x0c, 0xb3, 0x7c, 0x8c, 0x4f, 0x41, 0x06, 0xfa, 0x95, 0xfe, 0x07, 0xfb, 0x08, 0xdb, + 0x9a, 0x39, 0x64, 0x9f, 0x90, 0xb0, 0xad, 0x99, 0x7d, 0xcb, 0x51, 0x13, 0xfc, 0x69, 0x29, 0xe7, + 0x5b, 0x98, 0xc2, 0xe0, 0x32, 0xe5, 0x33, 0x0c, 0x18, 0xca, 0x4b, 0x38, 0xe7, 0x31, 0xbc, 0x0c, + 0x2b, 0x28, 0x9a, 0x87, 0xa9, 0xb6, 0xeb, 0x51, 0xe6, 0xb3, 0x67, 0x8a, 0xe2, 0xca, 0x90, 0xb4, + 0x66, 0x82, 0x71, 0x12, 0x1f, 0xb9, 0x5a, 0x48, 0x57, 0xfe, 0x75, 0x6f, 0x1f, 0x69, 0xd7, 0xcd, + 0x99, 0xbe, 0x34, 0x6a, 0x14, 0x33, 0xc2, 0xbb, 0xae, 0x69, 0x7a, 0xa2, 0x72, 0xff, 0x7a, 0xa2, + 0xf1, 0x6c, 0x1d, 0xd1, 0xec, 0xdb, 0x30, 0xf1, 0xd8, 0xf6, 0x1f, 0xfb, 0xfb, 0x65, 0x78, 0xa6, + 0x60, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, 0xc6, 0xeb, 0x53, 0xf3, 0x50, 0x83, 0x93, 0x9b, 0xdd, + 0x56, 0x6b, 0x8f, 0x3d, 0x6c, 0x25, 0x4d, 0x89, 0x21, 0x64, 0x4a, 0xf5, 0xf4, 0x6d, 0x25, 0x03, + 0x07, 0x67, 0xd6, 0xa4, 0x57, 0x2c, 0x7a, 0x92, 0xec, 0x29, 0x52, 0x89, 0x2b, 0x16, 0xd6, 0x81, + 0xd8, 0xc4, 0x45, 0xd7, 0x61, 0xc6, 0xd9, 0x75, 0x5c, 0x9e, 0x4c, 0x48, 0x12, 0xe0, 0x77, 0x2c, + 0xa5, 0x23, 0x9f, 0x4f, 0x22, 0xe0, 0x74, 0x9d, 0x1c, 0x53, 0x55, 0xf9, 0xb1, 0x4c, 0x55, 0x66, + 0x70, 0xd1, 0xa1, 0xfc, 0xe0, 0xa2, 0xc5, 0x7c, 0xb1, 0x67, 0x1e, 0xd6, 0x0f, 0x61, 0xe2, 0xa8, + 0x3e, 0xf1, 0x2f, 0xc1, 0xb0, 0x78, 0xc3, 0x93, 0x7c, 0xaf, 0x29, 0xf3, 0xff, 0x4b, 0xb8, 0xfd, + 0xbf, 0x5a, 0xa0, 0x74, 0xdc, 0x66, 0x1e, 0x81, 0xb7, 0x99, 0x83, 0x3f, 0xd7, 0xce, 0x6b, 0x6f, + 0x45, 0x4f, 0x69, 0x0e, 0xfe, 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0xb9, 0x85, 0x71, 0xc4, 0x1a, 0xe3, + 0x02, 0x21, 0x6c, 0xab, 0x0a, 0x03, 0x7d, 0x05, 0x86, 0x9b, 0xee, 0xae, 0x1b, 0x0a, 0x3d, 0xda, + 0x91, 0x6d, 0x93, 0xf1, 0xf7, 0x2d, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0xff, 0x8a, 0x05, 0xca, 0x28, + 0x7c, 0x83, 0x38, 0xad, 0x68, 0x1b, 0xbd, 0x07, 0x20, 0x29, 0x28, 0xdd, 0x9b, 0x74, 0x55, 0x03, + 0xac, 0x20, 0x87, 0xc6, 0x3f, 0xac, 0xd5, 0x41, 0xef, 0xc2, 0xd0, 0x36, 0xa3, 0x25, 0xbe, 0xed, + 0x92, 0x32, 0xc1, 0xb1, 0xd2, 0xc3, 0xfd, 0xea, 0x49, 0xb3, 0x4d, 0x79, 0x8a, 0xf1, 0x5a, 0xf6, + 0x4f, 0x95, 0xe2, 0x39, 0xfd, 0xa0, 0xeb, 0x47, 0xce, 0x31, 0x48, 0x22, 0xd7, 0x0d, 0x49, 0xe4, + 0x85, 0x22, 0xab, 0x37, 0xeb, 0x52, 0xae, 0x04, 0x72, 0x3b, 0x21, 0x81, 0xbc, 0xd8, 0x9b, 0x54, + 0xb1, 0xe4, 0xf1, 0x5f, 0x5b, 0x30, 0x63, 0xe0, 0x1f, 0xc3, 0x01, 0xb8, 0x62, 0x1e, 0x80, 0xcf, + 0xf6, 0xfc, 0x86, 0x9c, 0x83, 0xef, 0x27, 0xca, 0x89, 0xbe, 0xb3, 0x03, 0xef, 0x23, 0x18, 0xd8, + 0x76, 0x82, 0xa6, 0xb8, 0xd7, 0x5f, 0xe9, 0x6b, 0xac, 0xe7, 0x6e, 0x38, 0x81, 0x70, 0x73, 0x79, + 0x45, 0x8e, 0x3a, 0x2d, 0xea, 0xe9, 0xe2, 0xc2, 0x9a, 0x42, 0xd7, 0x60, 0x28, 0x6c, 0xf8, 0x1d, + 0xf5, 0x24, 0xf4, 0x22, 0x1b, 0x68, 0x56, 0x72, 0xb8, 0x5f, 0x45, 0x66, 0x73, 0xb4, 0x18, 0x0b, + 0x7c, 0xf4, 0x55, 0x98, 0x60, 0xbf, 0x94, 0xcf, 0x69, 0x39, 0x5f, 0x03, 0x53, 0xd7, 0x11, 0xb9, + 0x43, 0xb6, 0x51, 0x84, 0x4d, 0x52, 0xb3, 0x5b, 0x30, 0xaa, 0x3e, 0xeb, 0xa9, 0x7a, 0x24, 0xfc, + 0x8b, 0x32, 0x9c, 0xc8, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x6a, 0x9f, 0x4b, 0xf5, 0x63, 0xce, + 0x45, 0xc8, 0x2e, 0x80, 0x4d, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x4e, 0x48, 0x92, 0x8d, 0xd2, 0xa2, + 0xde, 0x8d, 0xd2, 0xc6, 0x8e, 0x6d, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x4f, 0x75, 0x4e, 0x7f, 0x6b, + 0x00, 0x4e, 0x66, 0x39, 0xe2, 0xa0, 0x1f, 0x85, 0x21, 0xf6, 0x9c, 0xaf, 0xf0, 0xfd, 0x6b, 0x56, + 0xcd, 0x39, 0xf6, 0x22, 0x50, 0x84, 0xa2, 0x9e, 0x93, 0xec, 0x88, 0x17, 0xf6, 0x1c, 0x66, 0xd1, + 0x26, 0x0b, 0x11, 0x27, 0x4e, 0x4f, 0xc9, 0x3e, 0x3e, 0xdf, 0x77, 0x07, 0xc4, 0xf9, 0x1b, 0x26, + 0xfc, 0xd9, 0x64, 0x71, 0x6f, 0x7f, 0x36, 0xd9, 0x32, 0x5a, 0x85, 0xa1, 0x06, 0x77, 0x94, 0x2a, + 0xf7, 0x66, 0x61, 0xdc, 0x4b, 0x4a, 0x31, 0x60, 0xe1, 0x1d, 0x25, 0x08, 0xcc, 0xba, 0x30, 0xa6, + 0x0d, 0xcc, 0x53, 0x5d, 0x3c, 0x3b, 0xf4, 0xe0, 0xd3, 0x86, 0xe0, 0xa9, 0x2e, 0xa0, 0xbf, 0xae, + 0x9d, 0xfd, 0x82, 0x1f, 0x7c, 0xce, 0x90, 0x9d, 0xce, 0x25, 0x1e, 0x59, 0x26, 0xf6, 0x15, 0x93, + 0xa5, 0xea, 0x66, 0x0e, 0x87, 0xdc, 0x44, 0x74, 0xe6, 0x81, 0x5f, 0x9c, 0xb7, 0xc1, 0xfe, 0x59, + 0x0b, 0x12, 0xcf, 0xe0, 0x94, 0xba, 0xd3, 0xca, 0x55, 0x77, 0x5e, 0x84, 0x81, 0xc0, 0x6f, 0x49, + 0x79, 0x4a, 0x61, 0x60, 0xbf, 0x45, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x56, 0x62, 0x8d, 0xeb, 0x17, + 0x74, 0x71, 0xf5, 0x7e, 0x0e, 0x06, 0x5b, 0x64, 0x97, 0xb4, 0x92, 0xf9, 0x98, 0x6f, 0xd1, 0x42, + 0xcc, 0x61, 0xf6, 0x3f, 0x1c, 0x80, 0xf3, 0x85, 0x91, 0x24, 0xa9, 0x80, 0xb9, 0xe5, 0x44, 0xe4, + 0x81, 0xb3, 0x97, 0xcc, 0x43, 0x7a, 0x9d, 0x17, 0x63, 0x09, 0x67, 0xef, 0xee, 0x79, 0x6e, 0xad, + 0x84, 0x72, 0x58, 0xa4, 0xd4, 0x12, 0x50, 0x53, 0xd9, 0x58, 0x7e, 0x12, 0xca, 0xc6, 0xd7, 0x00, + 0xc2, 0xb0, 0xc5, 0xbd, 0x5d, 0x9b, 0xe2, 0x41, 0x7f, 0x1c, 0xe9, 0xa4, 0x7e, 0x4b, 0x40, 0xb0, + 0x86, 0x85, 0x96, 0x60, 0xba, 0x13, 0xf8, 0x11, 0xd7, 0xb5, 0x2f, 0x71, 0x87, 0xf0, 0x41, 0x33, + 0x88, 0x5f, 0x2d, 0x01, 0xc7, 0xa9, 0x1a, 0xe8, 0x4d, 0x18, 0x13, 0x81, 0xfd, 0x6a, 0xbe, 0xdf, + 0x12, 0xea, 0x3d, 0xe5, 0x23, 0x5d, 0x8f, 0x41, 0x58, 0xc7, 0xd3, 0xaa, 0x31, 0x05, 0xfe, 0x70, + 0x66, 0x35, 0xae, 0xc4, 0xd7, 0xf0, 0x12, 0x49, 0x40, 0x46, 0xfa, 0x4a, 0x02, 0x12, 0x2b, 0x3c, + 0x47, 0xfb, 0xb6, 0x27, 0x43, 0x4f, 0x15, 0xe1, 0xaf, 0x0c, 0xc0, 0x09, 0xb1, 0x70, 0x9e, 0xf6, + 0x72, 0xb9, 0x93, 0x5e, 0x2e, 0x4f, 0x42, 0x25, 0xfa, 0xe9, 0x9a, 0x39, 0xee, 0x35, 0xf3, 0xd3, + 0x16, 0x98, 0x32, 0x24, 0xfa, 0x8f, 0x72, 0x13, 0x39, 0xbf, 0x99, 0x2b, 0x93, 0xc6, 0x19, 0x02, + 0x3e, 0x5e, 0x4a, 0x67, 0xfb, 0x7f, 0xb6, 0xe0, 0xd9, 0x9e, 0x14, 0xd1, 0x32, 0x8c, 0x32, 0x41, + 0x57, 0xbb, 0x17, 0xbf, 0xa8, 0x1e, 0x8c, 0x48, 0x40, 0x8e, 0xdc, 0x1d, 0xd7, 0x44, 0xcb, 0xa9, + 0x8c, 0xd9, 0x2f, 0x65, 0x64, 0xcc, 0x3e, 0x65, 0x0c, 0xcf, 0x63, 0xa6, 0xcc, 0xfe, 0x49, 0x7a, + 0xe2, 0x98, 0xaf, 0x4e, 0x3f, 0x6f, 0xa8, 0x73, 0xed, 0x84, 0x3a, 0x17, 0x99, 0xd8, 0xda, 0x19, + 0xf2, 0x1e, 0x4c, 0xb3, 0x88, 0xbf, 0xec, 0xf9, 0x92, 0x78, 0xae, 0x5a, 0x8a, 0xbd, 0x9d, 0x6f, + 0x25, 0x60, 0x38, 0x85, 0x6d, 0xff, 0x9b, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x17, 0xdf, 0x97, + 0x61, 0xd4, 0x6d, 0xb7, 0xbb, 0x3c, 0x09, 0xf2, 0x60, 0xec, 0xf0, 0xbe, 0x2a, 0x0b, 0x71, 0x0c, + 0x47, 0x2b, 0xc2, 0x92, 0x50, 0x90, 0x54, 0x80, 0x77, 0x7c, 0x6e, 0xc9, 0x89, 0x1c, 0x2e, 0xc5, + 0xa9, 0x73, 0x36, 0xb6, 0x39, 0xa0, 0x6f, 0x00, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x64, + 0x9e, 0xf9, 0x6c, 0x01, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x63, 0x9e, 0xa3, 0x00, 0x58, 0xa3, 0x88, + 0xe6, 0x8c, 0x93, 0x7e, 0x36, 0x31, 0x77, 0xc0, 0xa9, 0xc6, 0x73, 0x36, 0xfb, 0x05, 0x18, 0x55, + 0xc4, 0x7b, 0xe9, 0x15, 0xc7, 0x75, 0x81, 0xed, 0x4b, 0x30, 0x95, 0xe8, 0xdb, 0x91, 0xd4, 0x92, + 0xbf, 0x6e, 0xc1, 0x14, 0xef, 0xcc, 0xb2, 0xb7, 0x2b, 0x4e, 0x83, 0x47, 0x70, 0xb2, 0x95, 0xc1, + 0x95, 0xc5, 0xf4, 0xf7, 0xcf, 0xc5, 0x95, 0x1a, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, + 0x8e, 0xa3, 0x5c, 0xd7, 0x69, 0x89, 0x98, 0x2b, 0xe3, 0x7c, 0xb7, 0xf1, 0x32, 0xac, 0xa0, 0xf6, + 0x1f, 0x58, 0x30, 0xc3, 0x7b, 0x7e, 0x93, 0xec, 0x29, 0xde, 0xf4, 0x83, 0xec, 0xbb, 0x48, 0xbf, + 0x5f, 0xca, 0x49, 0xbf, 0xaf, 0x7f, 0x5a, 0xb9, 0xf0, 0xd3, 0x7e, 0xc9, 0x02, 0xb1, 0x42, 0x8e, + 0x41, 0xd3, 0xf2, 0x23, 0xa6, 0xa6, 0x65, 0x36, 0x7f, 0x13, 0xe4, 0xa8, 0x58, 0xfe, 0xbd, 0x05, + 0xd3, 0x1c, 0x41, 0x8b, 0x62, 0xf7, 0x83, 0x9c, 0x87, 0x05, 0xf3, 0x8b, 0x32, 0xdd, 0x5a, 0x6f, + 0x92, 0xbd, 0x0d, 0xbf, 0xe6, 0x44, 0xdb, 0xd9, 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xe1, 0x64, 0x35, + 0xe5, 0x06, 0x32, 0x12, 0xad, 0xf6, 0x50, 0x00, 0x1f, 0x35, 0xd1, 0xaa, 0xfd, 0x47, 0x16, 0x20, + 0xde, 0x8c, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x95, 0x66, 0x06, 0x0b, 0x54, 0x10, 0xac, 0x61, 0x3d, + 0x91, 0xe1, 0x49, 0xb8, 0xb2, 0x94, 0x7b, 0xbb, 0xb2, 0x1c, 0x61, 0x44, 0x7f, 0x69, 0x18, 0x92, + 0x0f, 0x56, 0xd1, 0x5d, 0x18, 0x6f, 0x38, 0x1d, 0xe7, 0xbe, 0xdb, 0x72, 0x23, 0x97, 0x84, 0x45, + 0x7e, 0x6e, 0x8b, 0x1a, 0x9e, 0x70, 0x3e, 0xd0, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, 0x74, 0x02, + 0x77, 0xd7, 0x6d, 0x91, 0x2d, 0xa6, 0x10, 0x62, 0x51, 0x9e, 0xb8, 0xd3, 0x9d, 0x2c, 0xc5, 0x1a, + 0x46, 0x46, 0x70, 0x95, 0xf2, 0x53, 0x0e, 0xae, 0x02, 0xc7, 0x16, 0x5c, 0x65, 0xe0, 0x48, 0xc1, + 0x55, 0x46, 0x8e, 0x1c, 0x5c, 0x65, 0xb0, 0xaf, 0xe0, 0x2a, 0x18, 0x4e, 0x4b, 0xd9, 0x93, 0xfe, + 0x5f, 0x71, 0x5b, 0x44, 0x5c, 0x38, 0x78, 0x68, 0xaa, 0xd9, 0x83, 0xfd, 0xea, 0x69, 0x9c, 0x89, + 0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x43, 0xc5, 0x69, 0xb5, 0xfc, 0x07, 0x6a, 0x52, 0x97, 0xc3, 0x86, + 0xd3, 0x8a, 0xc3, 0x32, 0x8e, 0x2c, 0x9c, 0x3b, 0xd8, 0xaf, 0x56, 0xe6, 0x73, 0x70, 0x70, 0x6e, + 0x6d, 0xf4, 0x0e, 0x8c, 0x76, 0x02, 0xbf, 0xb1, 0xa6, 0xbd, 0xaa, 0xbf, 0x40, 0x07, 0xb0, 0x26, + 0x0b, 0x0f, 0xf7, 0xab, 0x13, 0xea, 0x0f, 0x3b, 0xf0, 0xe3, 0x0a, 0x19, 0x71, 0x4b, 0xc6, 0x9e, + 0x76, 0xdc, 0x92, 0xf1, 0x27, 0x1c, 0xb7, 0xc4, 0xde, 0x81, 0x13, 0x75, 0x12, 0xb8, 0x4e, 0xcb, + 0x7d, 0x44, 0x65, 0x72, 0xc9, 0x03, 0x37, 0x60, 0x34, 0x48, 0x70, 0xfd, 0xbe, 0x92, 0x09, 0x68, + 0x7a, 0x19, 0xc9, 0xe5, 0x63, 0x42, 0xf6, 0xff, 0x6b, 0xc1, 0xb0, 0x78, 0x04, 0x7b, 0x0c, 0x92, + 0xe9, 0xbc, 0x61, 0x92, 0xa9, 0x66, 0x4f, 0x0a, 0xeb, 0x4c, 0xae, 0x31, 0x66, 0x35, 0x61, 0x8c, + 0x79, 0xb6, 0x88, 0x48, 0xb1, 0x19, 0xe6, 0x3f, 0x2b, 0xd3, 0x1b, 0x82, 0x11, 0x8e, 0xe1, 0xe9, + 0x0f, 0xc1, 0x3a, 0x0c, 0x87, 0x22, 0x1c, 0x40, 0x29, 0xff, 0x8d, 0x51, 0x72, 0x12, 0x63, 0x1f, + 0x48, 0x11, 0x00, 0x40, 0x12, 0xc9, 0x8c, 0x33, 0x50, 0x7e, 0x8a, 0x71, 0x06, 0x7a, 0x05, 0xac, + 0x18, 0x78, 0x12, 0x01, 0x2b, 0xec, 0xdf, 0x60, 0xa7, 0xb3, 0x5e, 0x7e, 0x0c, 0x82, 0xdb, 0x75, + 0xf3, 0x1c, 0xb7, 0x0b, 0x56, 0x96, 0xe8, 0x54, 0x8e, 0x00, 0xf7, 0x6b, 0x16, 0x9c, 0xcf, 0xf8, + 0x2a, 0x4d, 0x9a, 0x7b, 0x05, 0x46, 0x9c, 0x6e, 0xd3, 0x55, 0x7b, 0x59, 0xb3, 0x16, 0xcf, 0x8b, + 0x72, 0xac, 0x30, 0xd0, 0x22, 0xcc, 0x90, 0x54, 0x7c, 0x61, 0x1e, 0xb9, 0x8b, 0xbd, 0x9c, 0x4e, + 0x07, 0x17, 0x4e, 0xe3, 0xab, 0xa0, 0x77, 0xe5, 0xdc, 0xa0, 0x77, 0x7f, 0xcf, 0x82, 0x31, 0xf5, + 0x20, 0xfe, 0xa9, 0x8f, 0xf6, 0x7b, 0xe6, 0x68, 0x3f, 0x53, 0x30, 0xda, 0x39, 0xc3, 0xfc, 0x7b, + 0x25, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x7d, 0x48, 0x89, 0x8f, 0xff, 0xec, 0xe5, 0x2a, 0x8c, 0x39, + 0x9d, 0x8e, 0x04, 0x48, 0xff, 0x45, 0x96, 0x1a, 0x26, 0x2e, 0xc6, 0x3a, 0x8e, 0x7a, 0x85, 0x53, + 0xce, 0x7d, 0x85, 0xd3, 0x04, 0x88, 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0xb8, 0x5b, 0xe7, 0xf3, + 0x9b, 0x6e, 0xe4, 0xb6, 0xe6, 0x5c, 0x2f, 0x0a, 0xa3, 0x60, 0x6e, 0xd5, 0x8b, 0x6e, 0x07, 0xfc, + 0x9a, 0xaa, 0x85, 0x96, 0x54, 0xb4, 0xb0, 0x46, 0x57, 0x06, 0x7f, 0x61, 0x6d, 0x0c, 0x9a, 0x8e, + 0x30, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0b, 0xec, 0xf4, 0x61, 0x63, 0x7a, 0xb4, 0x90, 0x89, + 0x7f, 0x3c, 0xae, 0x66, 0x83, 0x99, 0x84, 0x97, 0xf4, 0xc0, 0x8c, 0xc5, 0xcc, 0x9e, 0x36, 0xac, + 0xbf, 0xb3, 0x8d, 0xa3, 0x37, 0xa2, 0xaf, 0xa5, 0x9c, 0x9b, 0x5e, 0xed, 0x71, 0x6a, 0x1c, 0xc1, + 0x9d, 0x89, 0xe5, 0x89, 0x64, 0x59, 0xf4, 0x56, 0x6b, 0x62, 0x5f, 0x68, 0x79, 0x22, 0x05, 0x00, + 0xc7, 0x38, 0x54, 0x60, 0x53, 0x7f, 0xc2, 0x0a, 0x8a, 0xd3, 0x09, 0x28, 0xec, 0x10, 0x6b, 0x18, + 0xe8, 0x8a, 0x50, 0x5a, 0x70, 0xdb, 0xc3, 0x33, 0x09, 0xa5, 0x85, 0x1c, 0x2e, 0x4d, 0xd3, 0x74, + 0x15, 0xc6, 0xc8, 0xc3, 0x88, 0x04, 0x9e, 0xd3, 0xa2, 0x2d, 0x0c, 0xc6, 0xc1, 0x91, 0x97, 0xe3, + 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0a, 0xb9, 0x2e, 0x4f, 0x25, 0xb1, 0xe1, 0x3a, 0xd1, 0xcf, + 0xaa, 0x50, 0x04, 0x26, 0xf8, 0x90, 0x15, 0x71, 0xee, 0x24, 0x03, 0xb4, 0x24, 0x49, 0xa0, 0x77, + 0x61, 0xb2, 0xe5, 0x3b, 0xcd, 0x05, 0xa7, 0xe5, 0x78, 0x0d, 0x36, 0x3e, 0x23, 0x46, 0x94, 0xce, + 0xc9, 0x5b, 0x06, 0x14, 0x27, 0xb0, 0xa9, 0x80, 0xa8, 0x97, 0x88, 0xc4, 0x4b, 0x8e, 0xb7, 0x45, + 0xc2, 0xca, 0x28, 0xfb, 0x2a, 0x26, 0x20, 0xde, 0xca, 0xc1, 0xc1, 0xb9, 0xb5, 0xd1, 0x35, 0x18, + 0x97, 0x9f, 0xaf, 0xc5, 0x33, 0x8a, 0x1f, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x4e, 0xc9, + 0xff, 0x1b, 0x81, 0xb3, 0xb9, 0xe9, 0x36, 0x44, 0x90, 0x0f, 0xfe, 0x28, 0xfd, 0x4b, 0xf2, 0x05, + 0xec, 0x72, 0x16, 0xd2, 0xe1, 0x7e, 0xf5, 0x9c, 0x18, 0xb5, 0x4c, 0x38, 0xce, 0xa6, 0x8d, 0xd6, + 0xe0, 0x04, 0xf7, 0x81, 0x59, 0xdc, 0x26, 0x8d, 0x1d, 0xb9, 0xe1, 0x98, 0xd4, 0xa8, 0x3d, 0xfc, + 0xb9, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x87, 0x50, 0xe9, 0x74, 0xef, 0xb7, 0xdc, 0x70, 0x7b, + 0xdd, 0x8f, 0x98, 0x0b, 0xd9, 0x7c, 0xb3, 0x19, 0x90, 0x90, 0xbf, 0x59, 0x66, 0x47, 0xaf, 0x8c, + 0x41, 0x55, 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0x23, 0x38, 0x95, 0x58, 0x08, 0x22, 0x98, 0xcc, + 0x64, 0x7e, 0x0a, 0xbb, 0x7a, 0x56, 0x05, 0x11, 0x97, 0x29, 0x0b, 0x84, 0xb3, 0x9b, 0x40, 0x6f, + 0x01, 0xb8, 0x9d, 0x15, 0xa7, 0xed, 0xb6, 0xe8, 0x75, 0xf4, 0x04, 0x5b, 0x23, 0xf4, 0x6a, 0x02, + 0xab, 0x35, 0x59, 0x4a, 0x79, 0xb3, 0xf8, 0xb7, 0x87, 0x35, 0x6c, 0x74, 0x0b, 0x26, 0xc5, 0xbf, + 0x3d, 0x31, 0xa5, 0x33, 0x2a, 0xdb, 0xf1, 0xa4, 0xac, 0xa1, 0xe6, 0x31, 0x51, 0x82, 0x13, 0x75, + 0xd1, 0x16, 0x9c, 0x97, 0xa9, 0x96, 0xf5, 0xf5, 0x29, 0xe7, 0x20, 0x64, 0x79, 0xe3, 0x46, 0xf8, + 0x9b, 0xa2, 0xf9, 0x22, 0x44, 0x5c, 0x4c, 0x87, 0x9e, 0xeb, 0xfa, 0x32, 0xe7, 0x2f, 0xd9, 0x4f, + 0xc5, 0xb1, 0x4e, 0x6f, 0x25, 0x81, 0x38, 0x8d, 0x8f, 0x7c, 0x38, 0xe5, 0x7a, 0x59, 0xab, 0xfa, + 0x34, 0x23, 0xf4, 0x45, 0xfe, 0x88, 0xbf, 0x78, 0x45, 0x67, 0xc2, 0x71, 0x36, 0x5d, 0xb4, 0x0a, + 0x27, 0x22, 0x5e, 0xb0, 0xe4, 0x86, 0x3c, 0x2d, 0x15, 0xbd, 0xf6, 0x9d, 0x61, 0xcd, 0x9d, 0xa1, + 0xab, 0x79, 0x23, 0x0d, 0xc6, 0x59, 0x75, 0x3e, 0x9e, 0x03, 0xe8, 0xef, 0x5b, 0xb4, 0xb6, 0x26, + 0xe8, 0xa3, 0x6f, 0xc2, 0xb8, 0x3e, 0x3e, 0x42, 0x68, 0xb9, 0x94, 0x2d, 0x07, 0x6b, 0xec, 0x85, + 0x5f, 0x13, 0x14, 0x0b, 0xd1, 0x61, 0xd8, 0xa0, 0x88, 0x1a, 0x19, 0xc1, 0x37, 0xae, 0xf4, 0x27, + 0x14, 0xf5, 0xef, 0xff, 0x48, 0x20, 0x7b, 0xe7, 0xa0, 0x5b, 0x30, 0xd2, 0x68, 0xb9, 0xc4, 0x8b, + 0x56, 0x6b, 0x45, 0x21, 0x68, 0x17, 0x05, 0x8e, 0xd8, 0x8a, 0x22, 0x9b, 0x1c, 0x2f, 0xc3, 0x8a, + 0x82, 0x7d, 0x0d, 0xc6, 0xea, 0x2d, 0x42, 0x3a, 0xfc, 0x1d, 0x17, 0x7a, 0x89, 0x5d, 0x4c, 0x98, + 0x68, 0x69, 0x31, 0xd1, 0x52, 0xbf, 0x73, 0x30, 0xa1, 0x52, 0xc2, 0xed, 0xdf, 0x2e, 0x41, 0xb5, + 0x47, 0x52, 0xc3, 0x84, 0xbd, 0xcd, 0xea, 0xcb, 0xde, 0x36, 0x0f, 0x53, 0xf1, 0x3f, 0x5d, 0x95, + 0xa7, 0x9c, 0xa1, 0xef, 0x9a, 0x60, 0x9c, 0xc4, 0xef, 0xfb, 0x5d, 0x8b, 0x6e, 0xb2, 0x1b, 0xe8, + 0xf9, 0x32, 0xcb, 0x30, 0xd5, 0x0f, 0xf6, 0x7f, 0xf7, 0xce, 0x35, 0xbb, 0xda, 0xbf, 0x51, 0x82, + 0x53, 0x6a, 0x08, 0x7f, 0x78, 0x07, 0xee, 0x4e, 0x7a, 0xe0, 0x9e, 0x80, 0xd1, 0xda, 0xbe, 0x0d, + 0x43, 0x3c, 0x2e, 0x6e, 0x1f, 0x32, 0xff, 0x73, 0x66, 0x1e, 0x06, 0x25, 0x66, 0x1a, 0xb9, 0x18, + 0xfe, 0x92, 0x05, 0x53, 0x89, 0x07, 0x92, 0x08, 0x6b, 0xaf, 0xe8, 0x1f, 0x47, 0x2e, 0xcf, 0x92, + 0xf8, 0x2f, 0xc2, 0xc0, 0xb6, 0xaf, 0x9c, 0x94, 0x15, 0xc6, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8, + 0xff, 0xd2, 0x82, 0xc1, 0x0d, 0xc7, 0xf5, 0x22, 0x69, 0xfd, 0xb0, 0x72, 0xac, 0x1f, 0xfd, 0x7c, + 0x17, 0x7a, 0x13, 0x86, 0xc8, 0xe6, 0x26, 0x69, 0x44, 0x62, 0x56, 0x65, 0x94, 0x8f, 0xa1, 0x65, + 0x56, 0x4a, 0x85, 0x50, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa3, 0x91, 0xdb, 0x26, + 0xf3, 0xcd, 0xa6, 0xf0, 0x09, 0x78, 0x8c, 0xd0, 0x34, 0x1b, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0xdf, + 0x2b, 0x01, 0xc4, 0x71, 0xf8, 0x7a, 0x7d, 0xe2, 0x42, 0xca, 0x5a, 0x7c, 0x29, 0xc3, 0x5a, 0x8c, + 0x62, 0x82, 0x19, 0xa6, 0x62, 0x35, 0x4c, 0xe5, 0xbe, 0x86, 0x69, 0xe0, 0x28, 0xc3, 0xb4, 0x08, + 0x33, 0x71, 0x1c, 0x41, 0x33, 0x8c, 0x2a, 0x3b, 0xbf, 0x37, 0x92, 0x40, 0x9c, 0xc6, 0xb7, 0x09, + 0x5c, 0x54, 0xe1, 0xd4, 0xc4, 0x59, 0xc8, 0x9e, 0x12, 0xe8, 0xd6, 0xf7, 0x1e, 0xe3, 0x14, 0x9b, + 0xc3, 0x4b, 0xb9, 0xe6, 0xf0, 0xbf, 0x69, 0xc1, 0xc9, 0x64, 0x3b, 0xec, 0xdd, 0xfd, 0x77, 0x2d, + 0x38, 0x15, 0xe7, 0xf4, 0x4a, 0xbb, 0x20, 0xbc, 0x51, 0x18, 0x22, 0x2e, 0xa7, 0xc7, 0x71, 0x38, + 0x99, 0xb5, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xff, 0x19, 0x80, 0x4a, 0x5e, 0x6c, 0x39, 0xf6, + 0xd2, 0xc8, 0x79, 0x58, 0xdf, 0x21, 0x0f, 0xc4, 0x7b, 0x8e, 0xf8, 0xa5, 0x11, 0x2f, 0xc6, 0x12, + 0x9e, 0x4c, 0xe3, 0x56, 0xea, 0x33, 0x8d, 0xdb, 0x36, 0xcc, 0x3c, 0xd8, 0x26, 0xde, 0x1d, 0x2f, + 0x74, 0x22, 0x37, 0xdc, 0x74, 0x99, 0x01, 0x9d, 0xaf, 0x9b, 0xb7, 0xe4, 0xab, 0x8b, 0x7b, 0x49, + 0x84, 0xc3, 0xfd, 0xea, 0x79, 0xa3, 0x20, 0xee, 0x32, 0x67, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x05, + 0x6f, 0xe0, 0x29, 0x67, 0xc1, 0x6b, 0xbb, 0xc2, 0xed, 0x46, 0x3e, 0x23, 0x61, 0xd7, 0xd6, 0x35, + 0x55, 0x8a, 0x35, 0x0c, 0xf4, 0x75, 0x40, 0x7a, 0x1a, 0x53, 0x23, 0xb4, 0xef, 0xab, 0x07, 0xfb, + 0x55, 0xb4, 0x9e, 0x82, 0x1e, 0xee, 0x57, 0x4f, 0xd0, 0xd2, 0x55, 0x8f, 0x5e, 0x7f, 0xe3, 0x78, + 0x88, 0x19, 0x84, 0xd0, 0x3d, 0x98, 0xa6, 0xa5, 0x6c, 0x47, 0xc9, 0xb8, 0xc1, 0xfc, 0xca, 0xfa, + 0xf2, 0xc1, 0x7e, 0x75, 0x7a, 0x3d, 0x01, 0xcb, 0x23, 0x9d, 0x22, 0x92, 0x91, 0x0c, 0x6f, 0xa4, + 0xdf, 0x64, 0x78, 0xf6, 0x77, 0x2d, 0x38, 0x4b, 0x0f, 0xb8, 0xe6, 0xad, 0x1c, 0x2b, 0xba, 0xd3, + 0x71, 0xb9, 0x9d, 0x46, 0x1c, 0x35, 0x4c, 0x57, 0x57, 0x5b, 0xe5, 0x56, 0x1a, 0x05, 0xa5, 0x1c, + 0x7e, 0xc7, 0xf5, 0x9a, 0x49, 0x0e, 0x7f, 0xd3, 0xf5, 0x9a, 0x98, 0x41, 0xd4, 0x91, 0x55, 0xce, + 0xcd, 0x43, 0xf0, 0x2b, 0x74, 0xaf, 0xd2, 0xbe, 0xfc, 0x40, 0xbb, 0x81, 0x5e, 0xd6, 0x6d, 0xaa, + 0xc2, 0x7d, 0x32, 0xd7, 0x9e, 0xfa, 0x1d, 0x0b, 0xc4, 0xeb, 0xf7, 0x3e, 0xce, 0xe4, 0xaf, 0xc2, + 0xf8, 0x6e, 0x3a, 0xc5, 0xf3, 0xc5, 0xfc, 0x70, 0x00, 0x22, 0xb1, 0xb3, 0x12, 0xd1, 0x8d, 0x74, + 0xce, 0x06, 0x2d, 0xbb, 0x09, 0x02, 0xba, 0x44, 0x98, 0x55, 0xa3, 0x77, 0x6f, 0x5e, 0x03, 0x68, + 0x32, 0x5c, 0x96, 0xec, 0xac, 0x64, 0x4a, 0x5c, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x17, 0xca, + 0x30, 0x26, 0x53, 0x0a, 0x77, 0xbd, 0x7e, 0x74, 0x8f, 0xba, 0xe0, 0x54, 0xea, 0x29, 0x38, 0x7d, + 0x08, 0x33, 0x01, 0x69, 0x74, 0x83, 0xd0, 0xdd, 0x25, 0x12, 0x2c, 0x36, 0xc9, 0x1c, 0x4f, 0x83, + 0x91, 0x00, 0x1e, 0xb2, 0xd0, 0x5d, 0x89, 0x42, 0x66, 0x34, 0x4e, 0x13, 0x42, 0x57, 0x60, 0x94, + 0xa9, 0xde, 0x6b, 0xb1, 0x42, 0x58, 0x29, 0xbe, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xec, 0x72, 0xd0, + 0xbd, 0xaf, 0x65, 0xa2, 0x8b, 0x2f, 0x07, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x32, 0x4c, 0xf3, 0x7a, + 0x81, 0xdf, 0x71, 0xb6, 0xb8, 0x49, 0x70, 0x50, 0x85, 0xd7, 0x99, 0x5e, 0x4b, 0xc0, 0x0e, 0xf7, + 0xab, 0x27, 0x93, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf3, 0x8f, 0x37, 0x42, 0xcf, 0x8c, 0x94, + 0xc3, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x27, 0x16, 0xcc, 0x68, 0x53, 0xd5, 0x77, 0x26, 0x12, + 0x63, 0x90, 0x4a, 0x7d, 0x0c, 0xd2, 0xd1, 0xa2, 0x3d, 0x64, 0xce, 0xf0, 0xc0, 0x13, 0x9a, 0x61, + 0xfb, 0x9b, 0x80, 0xd2, 0xf9, 0xaa, 0xd1, 0xfb, 0xdc, 0x91, 0xdf, 0x0d, 0x48, 0xb3, 0xc8, 0xe0, + 0xaf, 0x47, 0xce, 0x91, 0x2f, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x7f, 0x32, 0x00, 0xd3, 0xc9, + 0x58, 0x1d, 0xe8, 0x06, 0x0c, 0x71, 0x29, 0x5d, 0x90, 0x2f, 0xf0, 0x27, 0xd3, 0x22, 0x7c, 0xf0, + 0x2c, 0x41, 0x5c, 0xba, 0x17, 0xf5, 0xd1, 0x87, 0x30, 0xd6, 0xf4, 0x1f, 0x78, 0x0f, 0x9c, 0xa0, + 0x39, 0x5f, 0x5b, 0x15, 0x1c, 0x22, 0x53, 0x01, 0xb5, 0x14, 0xa3, 0xe9, 0x51, 0x43, 0x98, 0xef, + 0x44, 0x0c, 0xc2, 0x3a, 0x39, 0xb4, 0xc1, 0x12, 0x57, 0x6d, 0xba, 0x5b, 0x6b, 0x4e, 0xa7, 0xe8, + 0x55, 0xd7, 0xa2, 0x44, 0xd2, 0x28, 0x4f, 0x88, 0xec, 0x56, 0x1c, 0x80, 0x63, 0x42, 0xe8, 0x47, + 0xe1, 0x44, 0x98, 0x63, 0x12, 0xcb, 0x71, 0x38, 0x28, 0xb4, 0x12, 0x71, 0x65, 0x4a, 0x96, 0xf1, + 0x2c, 0xab, 0x19, 0xf4, 0x10, 0x90, 0x50, 0x3d, 0x6f, 0x04, 0xdd, 0x30, 0xe2, 0x29, 0x20, 0xc5, + 0xa5, 0xeb, 0x73, 0xd9, 0x7a, 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x0b, 0x9c, 0x9c, 0xc6, 0xc0, 0x19, + 0x6d, 0xa0, 0x6d, 0x98, 0xec, 0x18, 0xd9, 0x37, 0xd9, 0xde, 0xcc, 0x89, 0x2e, 0x9c, 0x97, 0xa7, + 0x93, 0x9f, 0xd2, 0x26, 0x14, 0x27, 0xe8, 0xda, 0xdf, 0x19, 0x80, 0x59, 0x99, 0x8a, 0x3e, 0xe3, + 0x9d, 0xcc, 0xb7, 0xad, 0xc4, 0x43, 0x99, 0xb7, 0xf2, 0x8f, 0x94, 0xa7, 0xf6, 0x5c, 0xe6, 0x27, + 0xd3, 0xcf, 0x65, 0xde, 0x39, 0x62, 0x37, 0x9e, 0xd8, 0xa3, 0x99, 0x1f, 0xda, 0x97, 0x2e, 0x07, + 0x27, 0xc1, 0x10, 0x02, 0x10, 0xe6, 0xf1, 0xef, 0x6b, 0xd2, 0x48, 0x95, 0xa3, 0x68, 0xb8, 0x21, + 0x70, 0x0c, 0xb1, 0x62, 0x5c, 0x46, 0xc9, 0x67, 0x1c, 0x5d, 0xd1, 0xa1, 0x34, 0x49, 0xbb, 0x13, + 0xed, 0x2d, 0xb9, 0x81, 0xe8, 0x71, 0x26, 0xcd, 0x65, 0x81, 0x93, 0xa6, 0x29, 0x21, 0x58, 0xd1, + 0x41, 0xbb, 0x30, 0xb3, 0xc5, 0x62, 0x4b, 0x69, 0x59, 0xe1, 0x05, 0x07, 0xca, 0xe4, 0x10, 0xd7, + 0x17, 0x97, 0xf3, 0x53, 0xc8, 0xf3, 0x6b, 0x66, 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa4, + 0xf3, 0x20, 0x5c, 0x6e, 0x39, 0x61, 0xe4, 0x36, 0x16, 0x5a, 0x7e, 0x63, 0xa7, 0x1e, 0xf9, 0x81, + 0xcc, 0x2a, 0x9a, 0x79, 0xcb, 0x9b, 0xbf, 0x57, 0x4f, 0xe1, 0x1b, 0xcd, 0xb3, 0xec, 0xb6, 0x59, + 0x58, 0x38, 0xb3, 0x2d, 0xb4, 0x0e, 0xc3, 0x5b, 0x6e, 0x84, 0x49, 0xc7, 0x17, 0x7c, 0x29, 0x93, + 0xe9, 0x5e, 0xe7, 0x28, 0x46, 0x4b, 0x2c, 0xf6, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xaf, 0x8e, + 0x9b, 0xa1, 0x7c, 0x55, 0x6f, 0xda, 0xcb, 0x2f, 0xf3, 0xc0, 0x79, 0x17, 0xca, 0xde, 0x66, 0x58, + 0x14, 0xf5, 0x67, 0x7d, 0xc5, 0xd0, 0xd4, 0x2d, 0x0c, 0xd3, 0x4b, 0xf8, 0xfa, 0x4a, 0x1d, 0xd3, + 0x8a, 0xec, 0x81, 0x6d, 0xd8, 0x08, 0x5d, 0x91, 0xbc, 0x2b, 0xf3, 0xbd, 0xf1, 0x6a, 0x7d, 0xb1, + 0xbe, 0x6a, 0xd0, 0x60, 0xf1, 0x13, 0x59, 0x31, 0xe6, 0xd5, 0xd1, 0x5d, 0x18, 0xdd, 0xe2, 0x2c, + 0x76, 0x93, 0x87, 0xb5, 0xcd, 0x39, 0xf6, 0xae, 0x4b, 0x24, 0x83, 0x1e, 0x3b, 0x9c, 0x14, 0x08, + 0xc7, 0xa4, 0xd0, 0x77, 0x2c, 0x38, 0xd5, 0x49, 0xe8, 0x6a, 0xd9, 0xb3, 0x38, 0xe1, 0x10, 0x97, + 0xf9, 0xd4, 0xa0, 0x96, 0x55, 0xc1, 0x68, 0x90, 0x19, 0x7a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, + 0xa0, 0x83, 0xfb, 0xcd, 0xa2, 0x7c, 0x4f, 0x89, 0x10, 0x48, 0x7c, 0xa0, 0xf1, 0xc2, 0x12, 0xa6, + 0x15, 0xd1, 0x06, 0xc0, 0x66, 0x8b, 0x88, 0xd8, 0x92, 0xc2, 0xfd, 0x2a, 0x53, 0xce, 0x58, 0x51, + 0x58, 0x82, 0x0e, 0xbb, 0xf3, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xc3, 0xf5, 0x9a, 0x24, + 0x60, 0x66, 0xb4, 0x9c, 0xa5, 0xb4, 0xc8, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, + 0x22, 0x9d, 0xed, 0xcd, 0xb0, 0x28, 0xb3, 0xc8, 0x22, 0xe9, 0x6c, 0x27, 0x16, 0x14, 0xa7, 0xc5, + 0xca, 0xb1, 0xa0, 0x40, 0xb7, 0xcc, 0x26, 0xdd, 0x40, 0x24, 0xa8, 0x4c, 0xe5, 0x6f, 0x99, 0x15, + 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x0d, 0x53, 0xae, 0x9a, 0x66, 0x34, 0x5f, + 0xee, 0x21, 0x57, 0x19, 0x74, 0x8b, 0x25, 0xab, 0xb7, 0xa0, 0xb4, 0xd9, 0x60, 0xe6, 0xb7, 0x1c, + 0xeb, 0xc4, 0xca, 0xa2, 0x41, 0x8d, 0x45, 0xea, 0x5f, 0x59, 0xc4, 0xa5, 0xcd, 0x06, 0x5d, 0xfa, + 0xce, 0xa3, 0x6e, 0x40, 0x56, 0xdc, 0x16, 0x11, 0xa1, 0x83, 0x33, 0x97, 0xfe, 0xbc, 0x44, 0x4a, + 0x2f, 0x7d, 0x05, 0xc2, 0x31, 0x29, 0x4a, 0x37, 0x96, 0xf6, 0x4e, 0xe4, 0xd3, 0x55, 0x42, 0x5d, + 0x9a, 0x6e, 0xa6, 0xbc, 0xb7, 0x03, 0x13, 0xbb, 0x61, 0x67, 0x9b, 0x48, 0xae, 0xc8, 0x0c, 0x83, + 0x39, 0x31, 0x31, 0xee, 0x0a, 0x44, 0x37, 0x88, 0xba, 0x4e, 0x2b, 0xc5, 0xc8, 0x99, 0x12, 0xe7, + 0xae, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0x42, 0xf8, 0x88, 0x07, 0xae, 0x63, 0x26, 0xc2, 0x9c, 0x85, + 0x90, 0x11, 0xdb, 0x8e, 0x2f, 0x04, 0x01, 0xc0, 0x92, 0x88, 0x1a, 0x6c, 0x76, 0x00, 0x9d, 0xee, + 0x31, 0xd8, 0xa9, 0xfe, 0xc6, 0x83, 0xcd, 0x0e, 0x9c, 0x98, 0x14, 0x3b, 0x68, 0x3a, 0xdb, 0x7e, + 0xe4, 0x7b, 0x89, 0x43, 0xee, 0x4c, 0xfe, 0x41, 0x53, 0xcb, 0xc0, 0x4f, 0x1f, 0x34, 0x59, 0x58, + 0x38, 0xb3, 0x2d, 0xfa, 0x71, 0x1d, 0x19, 0x83, 0x50, 0x64, 0x42, 0x79, 0x29, 0x27, 0x84, 0x67, + 0x3a, 0x50, 0x21, 0xff, 0x38, 0x05, 0xc2, 0x31, 0x29, 0xd4, 0xa4, 0x92, 0xae, 0x1e, 0xdb, 0x96, + 0x65, 0x74, 0xc9, 0x91, 0x0b, 0xb2, 0xa2, 0xe0, 0x4a, 0x29, 0x57, 0x87, 0xe0, 0x04, 0x4d, 0xe6, + 0x23, 0xc8, 0x1f, 0x15, 0xb2, 0x84, 0x2f, 0x39, 0x53, 0x9d, 0xf1, 0xee, 0x90, 0x4f, 0xb5, 0x00, + 0x60, 0x49, 0x84, 0x8e, 0x86, 0x78, 0x0a, 0xe7, 0x87, 0x2c, 0x6f, 0x52, 0x9e, 0x29, 0x3f, 0xcb, + 0x20, 0x25, 0x03, 0xcd, 0x0b, 0x10, 0x8e, 0x49, 0x51, 0x4e, 0x4e, 0x0f, 0xbc, 0x73, 0xf9, 0x9c, + 0x3c, 0x79, 0xdc, 0x31, 0x4e, 0x4e, 0x0f, 0xbb, 0xb2, 0x38, 0xea, 0x54, 0x5c, 0x74, 0x96, 0xf3, + 0x25, 0xa7, 0x5f, 0x2a, 0xb0, 0x7a, 0xba, 0x5f, 0x0a, 0x84, 0x63, 0x52, 0xec, 0x28, 0x66, 0x41, + 0xf0, 0x2e, 0x14, 0x1c, 0xc5, 0x14, 0x21, 0xe3, 0x28, 0xd6, 0x82, 0xe4, 0xd9, 0x7f, 0xb9, 0x04, + 0x17, 0x8a, 0xf7, 0x6d, 0x6c, 0xad, 0xab, 0xc5, 0xde, 0x51, 0x09, 0x6b, 0x1d, 0xd7, 0x1d, 0xc5, + 0x58, 0x7d, 0x87, 0x36, 0xbe, 0x0e, 0x33, 0xea, 0xe1, 0x63, 0xcb, 0x6d, 0xec, 0x69, 0x89, 0x5e, + 0x55, 0x10, 0xa0, 0x7a, 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0x79, 0x98, 0x32, 0x0a, 0x57, 0x97, 0x84, + 0xa2, 0x21, 0xce, 0x56, 0x62, 0x82, 0x71, 0x12, 0xdf, 0xfe, 0x45, 0x0b, 0xce, 0xf0, 0x40, 0xbc, + 0xa4, 0x59, 0xf3, 0x9b, 0x52, 0xa3, 0x70, 0xa4, 0xc8, 0xbd, 0x9b, 0x30, 0xd5, 0x31, 0xab, 0xf6, + 0x08, 0x36, 0xae, 0xa3, 0xc6, 0x7d, 0x4d, 0x00, 0x70, 0x92, 0xa8, 0xfd, 0xf3, 0x25, 0x38, 0x5f, + 0xe8, 0xc9, 0x8f, 0x30, 0x9c, 0xde, 0x6a, 0x87, 0xce, 0x62, 0x40, 0x9a, 0xc4, 0x8b, 0x5c, 0xa7, + 0x55, 0xef, 0x90, 0x86, 0x66, 0x6f, 0x65, 0x2e, 0xf1, 0xd7, 0xd7, 0xea, 0xf3, 0x69, 0x0c, 0x9c, + 0x53, 0x13, 0xad, 0x00, 0x4a, 0x43, 0xc4, 0x0c, 0xb3, 0xcb, 0x74, 0x9a, 0x1e, 0xce, 0xa8, 0x81, + 0xbe, 0x00, 0x13, 0xea, 0x85, 0x80, 0x36, 0xe3, 0xec, 0x80, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8, + 0x2a, 0x4f, 0x63, 0x25, 0x12, 0x9e, 0x09, 0xe3, 0xec, 0x94, 0xcc, 0x51, 0x25, 0x8a, 0xb1, 0x8e, + 0xb3, 0x70, 0xed, 0x77, 0xfe, 0xf0, 0xc2, 0x67, 0x7e, 0xf7, 0x0f, 0x2f, 0x7c, 0xe6, 0x0f, 0xfe, + 0xf0, 0xc2, 0x67, 0x7e, 0xfc, 0xe0, 0x82, 0xf5, 0x3b, 0x07, 0x17, 0xac, 0xdf, 0x3d, 0xb8, 0x60, + 0xfd, 0xc1, 0xc1, 0x05, 0xeb, 0x7f, 0x3b, 0xb8, 0x60, 0x7d, 0xef, 0x7f, 0xbf, 0xf0, 0x99, 0xaf, + 0xa2, 0x38, 0x16, 0xf6, 0x15, 0x3a, 0x3b, 0x57, 0x76, 0xaf, 0xfe, 0x87, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xba, 0xfb, 0xfc, 0xdd, 0x18, 0x2e, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -9346,6 +9549,22 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RestartPolicyRules) > 0 { + for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } if m.RestartPolicy != nil { i -= len(*m.RestartPolicy) copy(dAtA[i:], *m.RestartPolicy) @@ -9600,6 +9819,44 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerExtendedResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerExtendedResourceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerExtendedResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestName) + copy(dAtA[i:], m.RequestName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerImage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9712,6 +9969,81 @@ func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerRestartRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerRestartRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerRestartRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExitCodes != nil { + { + size, err := m.ExitCodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerRestartRuleOnExitCodes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerRestartRuleOnExitCodes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerRestartRuleOnExitCodes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Values[iNdEx])) + i-- + dAtA[i] = 0x10 + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -10681,6 +11013,18 @@ func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.FileKeyRef != nil { + { + size, err := m.FileKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.SecretKeyRef != nil { { size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) @@ -10790,6 +11134,22 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if len(m.RestartPolicyRules) > 0 { + for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } if m.RestartPolicy != nil { i -= len(*m.RestartPolicy) copy(dAtA[i:], *m.RestartPolicy) @@ -11426,6 +11786,54 @@ func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FileKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -15791,6 +16199,59 @@ func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodCertificateProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CertificateChainPath) + copy(dAtA[i:], m.CertificateChainPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChainPath))) + i-- + dAtA[i] = 0x32 + i -= len(m.KeyPath) + copy(dAtA[i:], m.KeyPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyPath))) + i-- + dAtA[i] = 0x2a + i -= len(m.CredentialBundlePath) + copy(dAtA[i:], m.CredentialBundlePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CredentialBundlePath))) + i-- + dAtA[i] = 0x22 + if m.MaxExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.KeyType) + copy(dAtA[i:], m.KeyType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyType))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16016,6 +16477,48 @@ func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodExtendedResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExtendedResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodExtendedResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ResourceClaimName) + copy(dAtA[i:], m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClaimName))) + i-- + dAtA[i] = 0x12 + if len(m.RequestMappings) > 0 { + for iNdEx := len(m.RequestMappings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequestMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *PodIP) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16597,6 +17100,15 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.HostnameOverride != nil { + i -= len(*m.HostnameOverride) + copy(dAtA[i:], *m.HostnameOverride) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HostnameOverride))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } if m.Resources != nil { { size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) @@ -17085,6 +17597,20 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExtendedResourceClaimStatus != nil { + { + size, err := m.ExtendedResourceClaimStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- dAtA[i] = 0x1 @@ -21108,6 +21634,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PodCertificate != nil { + { + size, err := m.PodCertificate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if m.ClusterTrustBundle != nil { { size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i]) @@ -22471,6 +23009,27 @@ func (m *Container) Size() (n int) { l = len(*m.RestartPolicy) n += 2 + l + sovGenerated(uint64(l)) } + if len(m.RestartPolicyRules) > 0 { + for _, e := range m.RestartPolicyRules { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerExtendedResourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RequestName) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -22520,6 +23079,37 @@ func (m *ContainerResizePolicy) Size() (n int) { return n } +func (m *ContainerRestartRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExitCodes != nil { + l = m.ExitCodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerRestartRuleOnExitCodes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, e := range m.Values { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + func (m *ContainerState) Size() (n int) { if m == nil { return 0 @@ -22896,6 +23486,10 @@ func (m *EnvVarSource) Size() (n int) { l = m.SecretKeyRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.FileKeyRef != nil { + l = m.FileKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23007,6 +23601,12 @@ func (m *EphemeralContainerCommon) Size() (n int) { l = len(*m.RestartPolicy) n += 2 + l + sovGenerated(uint64(l)) } + if len(m.RestartPolicyRules) > 0 { + for _, e := range m.RestartPolicyRules { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23149,6 +23749,24 @@ func (m *FCVolumeSource) Size() (n int) { return n } +func (m *FileKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + func (m *FlexPersistentVolumeSource) Size() (n int) { if m == nil { return 0 @@ -24752,6 +25370,28 @@ func (m *PodAttachOptions) Size() (n int) { return n } +func (m *PodCertificateProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyType) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds)) + } + l = len(m.CredentialBundlePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CertificateChainPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodCondition) Size() (n int) { if m == nil { return 0 @@ -24837,6 +25477,23 @@ func (m *PodExecOptions) Size() (n int) { return n } +func (m *PodExtendedResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequestMappings) > 0 { + for _, e := range m.RequestMappings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodIP) Size() (n int) { if m == nil { return 0 @@ -25224,6 +25881,10 @@ func (m *PodSpec) Size() (n int) { l = m.Resources.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.HostnameOverride != nil { + l = len(*m.HostnameOverride) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -25296,6 +25957,10 @@ func (m *PodStatus) Size() (n int) { } } n += 2 + sovGenerated(uint64(m.ObservedGeneration)) + if m.ExtendedResourceClaimStatus != nil { + l = m.ExtendedResourceClaimStatus.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -26751,6 +27416,10 @@ func (m *VolumeProjection) Size() (n int) { l = m.ClusterTrustBundle.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.PodCertificate != nil { + l = m.PodCertificate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -27426,6 +28095,11 @@ func (this *Container) String() string { repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," } repeatedStringForResizePolicy += "}" + repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{" + for _, f := range this.RestartPolicyRules { + repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRestartPolicyRules += "}" s := strings.Join([]string{`&Container{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -27451,6 +28125,19 @@ func (this *Container) String() string { `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, + `RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`, + `}`, + }, "") + return s +} +func (this *ContainerExtendedResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerExtendedResourceRequest{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `RequestName:` + fmt.Sprintf("%v", this.RequestName) + `,`, `}`, }, "") return s @@ -27491,6 +28178,28 @@ func (this *ContainerResizePolicy) String() string { }, "") return s } +func (this *ContainerRestartRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerRestartRule{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `ExitCodes:` + strings.Replace(this.ExitCodes.String(), "ContainerRestartRuleOnExitCodes", "ContainerRestartRuleOnExitCodes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerRestartRuleOnExitCodes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerRestartRuleOnExitCodes{`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *ContainerState) String() string { if this == nil { return "nil" @@ -27777,6 +28486,7 @@ func (this *EnvVarSource) String() string { `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `FileKeyRef:` + strings.Replace(this.FileKeyRef.String(), "FileKeySelector", "FileKeySelector", 1) + `,`, `}`, }, "") return s @@ -27826,6 +28536,11 @@ func (this *EphemeralContainerCommon) String() string { repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," } repeatedStringForResizePolicy += "}" + repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{" + for _, f := range this.RestartPolicyRules { + repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRestartPolicyRules += "}" s := strings.Join([]string{`&EphemeralContainerCommon{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -27851,6 +28566,7 @@ func (this *EphemeralContainerCommon) String() string { `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, + `RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`, `}`, }, "") return s @@ -27951,6 +28667,19 @@ func (this *FCVolumeSource) String() string { }, "") return s } +func (this *FileKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileKeySelector{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *FlexPersistentVolumeSource) String() string { if this == nil { return "nil" @@ -29169,6 +29898,21 @@ func (this *PodAttachOptions) String() string { }, "") return s } +func (this *PodCertificateProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCertificateProjection{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `KeyType:` + fmt.Sprintf("%v", this.KeyType) + `,`, + `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`, + `CredentialBundlePath:` + fmt.Sprintf("%v", this.CredentialBundlePath) + `,`, + `KeyPath:` + fmt.Sprintf("%v", this.KeyPath) + `,`, + `CertificateChainPath:` + fmt.Sprintf("%v", this.CertificateChainPath) + `,`, + `}`, + }, "") + return s +} func (this *PodCondition) String() string { if this == nil { return "nil" @@ -29228,6 +29972,22 @@ func (this *PodExecOptions) String() string { }, "") return s } +func (this *PodExtendedResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequestMappings := "[]ContainerExtendedResourceRequest{" + for _, f := range this.RequestMappings { + repeatedStringForRequestMappings += strings.Replace(strings.Replace(f.String(), "ContainerExtendedResourceRequest", "ContainerExtendedResourceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequestMappings += "}" + s := strings.Join([]string{`&PodExtendedResourceClaimStatus{`, + `RequestMappings:` + repeatedStringForRequestMappings + `,`, + `ResourceClaimName:` + fmt.Sprintf("%v", this.ResourceClaimName) + `,`, + `}`, + }, "") + return s +} func (this *PodIP) String() string { if this == nil { return "nil" @@ -29503,6 +30263,7 @@ func (this *PodSpec) String() string { `SchedulingGates:` + repeatedStringForSchedulingGates + `,`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `HostnameOverride:` + valueToStringGenerated(this.HostnameOverride) + `,`, `}`, }, "") return s @@ -29564,6 +30325,7 @@ func (this *PodStatus) String() string { `ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`, `HostIPs:` + repeatedStringForHostIPs + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ExtendedResourceClaimStatus:` + strings.Replace(this.ExtendedResourceClaimStatus.String(), "PodExtendedResourceClaimStatus", "PodExtendedResourceClaimStatus", 1) + `,`, `}`, }, "") return s @@ -30673,6 +31435,7 @@ func (this *VolumeProjection) String() string { `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, `ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`, + `PodCertificate:` + strings.Replace(this.PodCertificate.String(), "PodCertificateProjection", "PodCertificateProjection", 1) + `,`, `}`, }, "") return s @@ -36465,61 +37228,11 @@ func (m *Container) Unmarshal(dAtA []byte) error { s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) m.RestartPolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerImage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 25: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36529,43 +37242,26 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) - } - m.SizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SizeBytes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{}) + if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -36587,7 +37283,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(dAtA []byte) error { +func (m *ContainerExtendedResourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36610,15 +37306,15 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerExtendedResourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerExtendedResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36646,49 +37342,11 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HostPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ContainerPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36716,11 +37374,11 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.ResourceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequestName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36748,7 +37406,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostIP = string(dAtA[iNdEx:postIndex]) + m.RequestName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -36771,7 +37429,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { +func (m *ContainerImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36794,15 +37452,15 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36830,13 +37488,13 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) } - var stringLen uint64 + m.SizeBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36846,24 +37504,585 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SizeBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerRestartRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerRestartRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerRestartRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = ContainerRestartRuleAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExitCodes == nil { + m.ExitCodes = &ContainerRestartRuleOnExitCodes{} + } + if err := m.ExitCodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerRestartRuleOnExitCodes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = ContainerRestartRuleOnExitCodesOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Values) == 0 { + m.Values = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -38790,13 +40009,196 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var msglen int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38806,31 +40208,14 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} - } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38858,11 +40243,11 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38891,7 +40276,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s + m.AppProtocol = &s iNdEx = postIndex default: iNdEx = preIndex @@ -38914,7 +40299,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(dAtA []byte) error { +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -38937,17 +40322,17 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38957,48 +40342,31 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39008,29 +40376,31 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39040,24 +40410,25 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.AppProtocol = &s + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -39080,7 +40451,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { +func (m *Endpoints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39103,15 +40474,15 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39138,48 +40509,13 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39206,8 +40542,8 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39232,7 +40568,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } return nil } -func (m *Endpoints) Unmarshal(dAtA []byte) error { +func (m *EndpointsList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39255,15 +40591,15 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39290,13 +40626,13 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39323,8 +40659,8 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39349,7 +40685,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointsList) Unmarshal(dAtA []byte) error { +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39372,15 +40708,47 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39407,13 +40775,16 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39440,8 +40811,10 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39466,7 +40839,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { +func (m *EnvVar) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39489,15 +40862,15 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -39525,13 +40898,13 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39541,31 +40914,27 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} - } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39592,10 +40961,10 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39620,7 +40989,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVar) Unmarshal(dAtA []byte) error { +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39643,79 +41012,15 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39742,66 +41047,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39828,16 +41083,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39864,16 +41119,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39900,16 +41155,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FileKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39936,10 +41191,10 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} + if m.FileKeyRef == nil { + m.FileKeyRef = &FileKeySelector{} } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FileKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -40700,46 +41955,116 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40766,16 +42091,16 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 22: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40785,31 +42110,28 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartupProbe == nil { - m.StartupProbe = &Probe{} - } - if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) + m.RestartPolicy = &s iNdEx = postIndex - case 23: + case 25: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40836,44 +42158,11 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) - if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{}) + if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) - m.RestartPolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42113,6 +43402,173 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *FileKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -55916,17 +57372,297 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodCertificateProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -55936,31 +57672,29 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SignerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -55970,81 +57704,29 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.KeyType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56054,17 +57736,17 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + m.MaxExpirationSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialBundlePath", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56074,37 +57756,29 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int + m.CredentialBundlePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56114,15 +57788,27 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TTY = bool(v != 0) - case 5: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CertificateChainPath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56150,7 +57836,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(dAtA[iNdEx:postIndex]) + m.CertificateChainPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -56607,15 +58293,210 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56643,11 +58524,11 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56675,8 +58556,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Value = &s + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -56699,7 +58579,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodExecOptions) Unmarshal(dAtA []byte) error { +func (m *PodExtendedResourceClaimStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -56722,97 +58602,17 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodExtendedResourceClaimStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodExtendedResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequestMappings", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56822,27 +58622,29 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(dAtA[iNdEx:postIndex]) + m.RequestMappings = append(m.RequestMappings, ContainerExtendedResourceRequest{}) + if err := m.RequestMappings[len(m.RequestMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56870,7 +58672,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.ResourceClaimName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -60088,6 +61890,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostnameOverride", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HostnameOverride = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -60687,6 +62522,42 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { break } } + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceClaimStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtendedResourceClaimStatus == nil { + m.ExtendedResourceClaimStatus = &PodExtendedResourceClaimStatus{} + } + if err := m.ExtendedResourceClaimStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -73489,6 +75360,42 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCertificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodCertificate == nil { + m.PodCertificate = &PodCertificateProjection{} + } + if err := m.PodCertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/openshift/tools/vendor/k8s.io/api/core/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/core/v1/generated.proto index 9b48fb1c39..fb26953147 100644 --- a/openshift/tools/vendor/k8s.io/api/core/v1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/core/v1/generated.proto @@ -737,8 +737,8 @@ message Container { repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -768,10 +768,10 @@ message Container { repeated ContainerResizePolicy resizePolicy = 23; // RestartPolicy defines the restart behavior of individual containers in a pod. - // This field may only be set for init containers, and the only allowed value is "Always". - // For non-init containers or when this field is not specified, + // This overrides the pod-level restart policy. When this field is not specified, // the restart behavior is defined by the Pod's restart policy and the container type. - // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // Additionally, setting the RestartPolicy as "Always" for the init container will + // have the following effect: // this init container will be continually restarted on // exit until all regular containers have terminated. Once all regular // containers have completed, all init containers with restartPolicy "Always" @@ -786,6 +786,22 @@ message Container { // +optional optional string restartPolicy = 24; + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. The rules are evaluated in + // order. Once a rule matches a container exit condition, the remaining + // rules are ignored. If no rule matches the container exit condition, + // the Container-level restart policy determines the whether the container + // is restarted or not. Constraints on the rules: + // - At most 20 rules are allowed. + // - Rules can have the same action. + // - Identical rules are not forbidden in validations. + // When rules are specified, container MUST set RestartPolicy explicitly + // even it if matches the Pod's RestartPolicy. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + repeated ContainerRestartRule restartPolicyRules = 25; + // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -888,6 +904,19 @@ message Container { optional bool tty = 18; } +// ContainerExtendedResourceRequest has the mapping of container name, +// extended resource name to the device request name. +message ContainerExtendedResourceRequest { + // The name of the container requesting resources. + optional string containerName = 1; + + // The name of the extended resource in that container which gets backed by DRA. + optional string resourceName = 2; + + // The name of the request in the special ResourceClaim which corresponds to the extended resource. + optional string requestName = 3; +} + // Describe a container image message ContainerImage { // Names by which this image is known. @@ -942,6 +971,39 @@ message ContainerResizePolicy { optional string restartPolicy = 2; } +// ContainerRestartRule describes how a container exit is handled. +message ContainerRestartRule { + // Specifies the action taken on a container exit if the requirements + // are satisfied. The only possible value is "Restart" to restart the + // container. + // +required + optional string action = 1; + + // Represents the exit codes to check on container exits. + // +optional + // +oneOf=when + optional ContainerRestartRuleOnExitCodes exitCodes = 2; +} + +// ContainerRestartRuleOnExitCodes describes the condition +// for handling an exited container based on its exit codes. +message ContainerRestartRuleOnExitCodes { + // Represents the relationship between the container exit code(s) and the + // specified values. Possible values are: + // - In: the requirement is satisfied if the container exit code is in the + // set of specified values. + // - NotIn: the requirement is satisfied if the container exit code is + // not in the set of specified values. + // +required + optional string operator = 1; + + // Specifies the set of values to check for container exit codes. + // At most 255 elements are allowed. + // +optional + // +listType=set + repeated int32 values = 2; +} + // ContainerState holds a possible state of container. // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. @@ -1344,7 +1406,8 @@ message EndpointsList { // EnvFromSource represents the source of a set of ConfigMaps or Secrets message EnvFromSource { - // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER. + // Optional text to prepend to the name of each environment variable. + // May consist of any printable ASCII characters except '='. // +optional optional string prefix = 1; @@ -1359,7 +1422,8 @@ message EnvFromSource { // EnvVar represents an environment variable present in a Container. message EnvVar { - // Name of the environment variable. Must be a C_IDENTIFIER. + // Name of the environment variable. + // May consist of any printable ASCII characters except '='. optional string name = 1; // Variable references $(VAR_NAME) are expanded @@ -1398,6 +1462,13 @@ message EnvVarSource { // Selects a key of a secret in the pod's namespace // +optional optional SecretKeySelector secretKeyRef = 4; + + // FileKeyRef selects a key of the env file. + // Requires the EnvFiles feature gate to be enabled. + // + // +featureGate=EnvFiles + // +optional + optional FileKeySelector fileKeyRef = 5; } // An EphemeralContainer is a temporary container that you may add to an existing Pod for @@ -1479,8 +1550,8 @@ message EphemeralContainerCommon { repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -1510,12 +1581,19 @@ message EphemeralContainerCommon { // Restart policy for the container to manage the restart behavior of each // container within a pod. - // This may only be set for init containers. You cannot set this field on - // ephemeral containers. + // You cannot set this field on ephemeral containers. // +featureGate=SidecarContainers // +optional optional string restartPolicy = 24; + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. You cannot set this field on + // ephemeral containers. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + repeated ContainerRestartRule restartPolicyRules = 25; + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -1776,6 +1854,36 @@ message FCVolumeSource { repeated string wwids = 5; } +// FileKeySelector selects a key of the env file. +// +structType=atomic +message FileKeySelector { + // The name of the volume mount containing the env file. + // +required + optional string volumeName = 1; + + // The path within the volume from which to select the file. + // Must be relative and may not contain the '..' path or start with '..'. + // +required + optional string path = 2; + + // The key within the env file. An invalid key will prevent the pod from starting. + // The keys defined within a source may consist of any printable ASCII characters except '='. + // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + // +required + optional string key = 3; + + // Specify whether the file or its key must be defined. If the file or key + // does not exist, then the env var is not published. + // If optional is set to true and the specified key does not exist, + // the environment variable will not be set in the Pod's containers. + // + // If optional is set to false and the specified key does not exist, + // an error will be returned during Pod creation. + // +optional + // +default=false + optional bool optional = 4; +} + // FlexPersistentVolumeSource represents a generic persistent volume resource that is // provisioned/attached using an exec based plugin. message FlexPersistentVolumeSource { @@ -1949,7 +2057,6 @@ message GlusterfsPersistentVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // endpoints is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // path is the Glusterfs volume path. @@ -3160,15 +3267,13 @@ message PersistentVolumeClaimSpec { // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. // If specified, the CSI driver will create or update the volume with the attributes defined // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - // will be set by the persistentvolume controller if it exists. + // it can be changed after the claim is created. An empty string or nil value indicates that no + // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + // this field can be reset to its previous value (including nil) to cancel the modification. // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 9; @@ -3267,14 +3372,12 @@ message PersistentVolumeClaimStatus { // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string currentVolumeAttributesClassName = 8; // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional ModifyVolumeStatus modifyVolumeStatus = 9; @@ -3515,7 +3618,6 @@ message PersistentVolumeSpec { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 10; @@ -3684,8 +3786,8 @@ message PodAntiAffinity { // most preferred is the one with the greatest sum of weights, i.e. // for each node that meets all of the scheduling requirements (resource // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // compute a sum by iterating through the elements of this field and subtracting + // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional // +listType=atomic @@ -3725,6 +3827,79 @@ message PodAttachOptions { optional string container = 5; } +// PodCertificateProjection provides a private key and X.509 certificate in the +// pod filesystem. +message PodCertificateProjection { + // Kubelet's generated CSRs will be addressed to this signer. + // + // +required + optional string signerName = 1; + + // The type of keypair Kubelet will generate for the pod. + // + // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + // "ECDSAP521", and "ED25519". + // + // +required + optional string keyType = 2; + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // Kubelet copies this value verbatim into the PodCertificateRequests it + // generates for this projection. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + optional int32 maxExpirationSeconds = 3; + + // Write the credential bundle at this path in the projected volume. + // + // The credential bundle is a single file that contains multiple PEM blocks. + // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + // key. + // + // The remaining blocks are CERTIFICATE blocks, containing the issued + // certificate chain from the signer (leaf and any intermediates). + // + // Using credentialBundlePath lets your Pod's application code make a single + // atomic read that retrieves a consistent key and certificate chain. If you + // project them to separate files, your application code will need to + // additionally check that the leaf certificate was issued to the key. + // + // +optional + optional string credentialBundlePath = 4; + + // Write the key at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + optional string keyPath = 5; + + // Write the certificate chain at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + optional string certificateChainPath = 6; +} + // PodCondition contains details for the current condition of this pod. message PodCondition { // Type is the type of the condition. @@ -3829,6 +4004,20 @@ message PodExecOptions { repeated string command = 6; } +// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended +// resource requests backed by DRA. It stores the generated name for +// the corresponding special ResourceClaim created by the scheduler. +message PodExtendedResourceClaimStatus { + // RequestMappings identifies the mapping of to device request + // in the generated ResourceClaim. + // +listType=atomic + repeated ContainerExtendedResourceRequest requestMappings = 1; + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. + optional string resourceClaimName = 2; +} + // PodIP represents a single IP address allocated to the pod. message PodIP { // IP is the IP address assigned to the pod @@ -4269,7 +4458,9 @@ message PodSpec { optional string nodeName = 10; // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. + // When using HostNetwork you should specify ports so the scheduler is aware. + // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, + // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. // Default to false. // +k8s:conversion-gen=false // +optional @@ -4434,6 +4625,7 @@ message PodSpec { // - spec.hostPID // - spec.hostIPC // - spec.hostUsers + // - spec.resources // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile @@ -4504,7 +4696,7 @@ message PodSpec { // Resources is the total amount of CPU and Memory resources required by all // containers in the pod. It supports specifying Requests and Limits for - // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported. // // This field enables fine-grained control over resource allocation for the // entire pod, allowing resource sharing among containers in a pod. @@ -4516,6 +4708,21 @@ message PodSpec { // +featureGate=PodLevelResources // +optional optional ResourceRequirements resources = 40; + + // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. + // This field only specifies the pod's hostname and does not affect its DNS records. + // When this field is set to a non-empty string: + // - It takes precedence over the values set in `hostname` and `subdomain`. + // - The Pod's hostname will be set to this value. + // - `setHostnameAsFQDN` must be nil or set to false. + // - `hostNetwork` must be set to false. + // + // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. + // Requires the HostnameOverride feature gate to be enabled. + // + // +featureGate=HostnameOverride + // +optional + optional string hostnameOverride = 41; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -4674,6 +4881,11 @@ message PodStatus { // +featureGate=DynamicResourceAllocation // +optional repeated PodResourceClaimStatus resourceClaimStatuses = 15; + + // Status of extended resource claim backed by DRA. + // +featureGate=DRAExtendedResource + // +optional + optional PodExtendedResourceClaimStatus extendedResourceClaimStatus = 18; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -5298,7 +5510,7 @@ message ResourceRequirements { // Claims lists the names of resources, defined in spec.resourceClaims, // that are used by this container. // - // This is an alpha field and requires enabling the + // This field depends on the // DynamicResourceAllocation feature gate. // // This field is immutable. It can only be set for containers. @@ -6301,7 +6513,6 @@ message Taint { optional string effect = 3; // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; } @@ -6682,6 +6893,44 @@ message VolumeProjection { // +featureGate=ClusterTrustBundleProjection // +optional optional ClusterTrustBundleProjection clusterTrustBundle = 5; + + // Projects an auto-rotating credential bundle (private key and certificate + // chain) that the pod can use either as a TLS client or server. + // + // Kubelet generates a private key and uses it to send a + // PodCertificateRequest to the named signer. Once the signer approves the + // request and issues a certificate chain, Kubelet writes the key and + // certificate chain to the pod filesystem. The pod does not start until + // certificates have been issued for each podCertificate projected volume + // source in its spec. + // + // Kubelet will begin trying to rotate the certificate at the time indicated + // by the signer using the PodCertificateRequest.Status.BeginRefreshAt + // timestamp. + // + // Kubelet can write a single file, indicated by the credentialBundlePath + // field, or separate files, indicated by the keyPath and + // certificateChainPath fields. + // + // The credential bundle is a single file in PEM format. The first PEM + // entry is the private key (in PKCS#8 format), and the remaining PEM + // entries are the certificate chain issued by the signer (typically, + // signers will return their certificate chain in leaf-to-root order). + // + // Prefer using the credential bundle format, since your application code + // can read it atomically. If you use keyPath and certificateChainPath, + // your application must make two separate file reads. If these coincide + // with a certificate rotation, it is possible that the private key and leaf + // certificate you read may not correspond to each other. Your application + // will need to check for this condition, and re-read until they are + // consistent. + // + // The named signer controls chooses the format of the certificate it + // issues; consult the signer implementation's documentation to learn how to + // use the certificates it issues. + // + // +featureGate=PodCertificateProjection +optional + optional PodCertificateProjection podCertificate = 6; } // VolumeResourceRequirements describes the storage resource requirements for a volume. @@ -6753,13 +7002,12 @@ message VolumeSource { // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://examples.k8s.io/volumes/iscsi/README.md + // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi // +optional optional ISCSIVolumeSource iscsi = 8; // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -6771,7 +7019,6 @@ message VolumeSource { // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; diff --git a/openshift/tools/vendor/k8s.io/api/core/v1/types.go b/openshift/tools/vendor/k8s.io/api/core/v1/types.go index f7641e485a..08b6d351cc 100644 --- a/openshift/tools/vendor/k8s.io/api/core/v1/types.go +++ b/openshift/tools/vendor/k8s.io/api/core/v1/types.go @@ -91,12 +91,11 @@ type VolumeSource struct { NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://examples.k8s.io/volumes/iscsi/README.md + // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // persistentVolumeClaimVolumeSource represents a reference to a @@ -106,7 +105,6 @@ type VolumeSource struct { PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // flexVolume represents a generic volume resource that is @@ -437,7 +435,6 @@ type PersistentVolumeSpec struct { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` @@ -616,15 +613,13 @@ type PersistentVolumeClaimSpec struct { // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. // If specified, the CSI driver will create or update the volume with the attributes defined // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - // will be set by the persistentvolume controller if it exists. + // it can be changed after the claim is created. An empty string or nil value indicates that no + // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + // this field can be reset to its previous value (including nil) to cancel the modification. // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` @@ -851,13 +846,11 @@ type PersistentVolumeClaimStatus struct { AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` @@ -972,7 +965,6 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // endpoints is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // path is the Glusterfs volume path. @@ -1993,6 +1985,79 @@ type ClusterTrustBundleProjection struct { Path string `json:"path" protobuf:"bytes,4,rep,name=path"` } +// PodCertificateProjection provides a private key and X.509 certificate in the +// pod filesystem. +type PodCertificateProjection struct { + // Kubelet's generated CSRs will be addressed to this signer. + // + // +required + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,rep,name=signerName"` + + // The type of keypair Kubelet will generate for the pod. + // + // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + // "ECDSAP521", and "ED25519". + // + // +required + KeyType string `json:"keyType,omitempty" protobuf:"bytes,2,rep,name=keyType"` + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // Kubelet copies this value verbatim into the PodCertificateRequests it + // generates for this projection. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,3,opt,name=maxExpirationSeconds"` + + // Write the credential bundle at this path in the projected volume. + // + // The credential bundle is a single file that contains multiple PEM blocks. + // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + // key. + // + // The remaining blocks are CERTIFICATE blocks, containing the issued + // certificate chain from the signer (leaf and any intermediates). + // + // Using credentialBundlePath lets your Pod's application code make a single + // atomic read that retrieves a consistent key and certificate chain. If you + // project them to separate files, your application code will need to + // additionally check that the leaf certificate was issued to the key. + // + // +optional + CredentialBundlePath string `json:"credentialBundlePath,omitempty" protobuf:"bytes,4,rep,name=credentialBundlePath"` + + // Write the key at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + KeyPath string `json:"keyPath,omitempty" protobuf:"bytes,5,rep,name=keyPath"` + + // Write the certificate chain at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"` +} + // Represents a projected volume source type ProjectedVolumeSource struct { // sources is the list of volume projections. Each entry in this list @@ -2043,6 +2108,44 @@ type VolumeProjection struct { // +featureGate=ClusterTrustBundleProjection // +optional ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"` + + // Projects an auto-rotating credential bundle (private key and certificate + // chain) that the pod can use either as a TLS client or server. + // + // Kubelet generates a private key and uses it to send a + // PodCertificateRequest to the named signer. Once the signer approves the + // request and issues a certificate chain, Kubelet writes the key and + // certificate chain to the pod filesystem. The pod does not start until + // certificates have been issued for each podCertificate projected volume + // source in its spec. + // + // Kubelet will begin trying to rotate the certificate at the time indicated + // by the signer using the PodCertificateRequest.Status.BeginRefreshAt + // timestamp. + // + // Kubelet can write a single file, indicated by the credentialBundlePath + // field, or separate files, indicated by the keyPath and + // certificateChainPath fields. + // + // The credential bundle is a single file in PEM format. The first PEM + // entry is the private key (in PKCS#8 format), and the remaining PEM + // entries are the certificate chain issued by the signer (typically, + // signers will return their certificate chain in leaf-to-root order). + // + // Prefer using the credential bundle format, since your application code + // can read it atomically. If you use keyPath and certificateChainPath, + // your application must make two separate file reads. If these coincide + // with a certificate rotation, it is possible that the private key and leaf + // certificate you read may not correspond to each other. Your application + // will need to check for this condition, and re-read until they are + // consistent. + // + // The named signer controls chooses the format of the certificate it + // issues; consult the signer implementation's documentation to learn how to + // use the certificates it issues. + // + // +featureGate=PodCertificateProjection +optional + PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"` } const ( @@ -2351,7 +2454,8 @@ type VolumeDevice struct { // EnvVar represents an environment variable present in a Container. type EnvVar struct { - // Name of the environment variable. Must be a C_IDENTIFIER. + // Name of the environment variable. + // May consist of any printable ASCII characters except '='. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional: no more than one of the following may be specified. @@ -2388,6 +2492,39 @@ type EnvVarSource struct { // Selects a key of a secret in the pod's namespace // +optional SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` + // FileKeyRef selects a key of the env file. + // Requires the EnvFiles feature gate to be enabled. + // + // +featureGate=EnvFiles + // +optional + FileKeyRef *FileKeySelector `json:"fileKeyRef,omitempty" protobuf:"bytes,5,opt,name=fileKeyRef"` +} + +// FileKeySelector selects a key of the env file. +// +structType=atomic +type FileKeySelector struct { + // The name of the volume mount containing the env file. + // +required + VolumeName string `json:"volumeName" protobuf:"bytes,1,opt,name=volumeName"` + // The path within the volume from which to select the file. + // Must be relative and may not contain the '..' path or start with '..'. + // +required + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + // The key within the env file. An invalid key will prevent the pod from starting. + // The keys defined within a source may consist of any printable ASCII characters except '='. + // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + // +required + Key string `json:"key" protobuf:"bytes,3,opt,name=key"` + // Specify whether the file or its key must be defined. If the file or key + // does not exist, then the env var is not published. + // If optional is set to true and the specified key does not exist, + // the environment variable will not be set in the Pod's containers. + // + // If optional is set to false and the specified key does not exist, + // an error will be returned during Pod creation. + // +optional + // +default=false + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } // ObjectFieldSelector selects an APIVersioned field of an object. @@ -2439,7 +2576,8 @@ type SecretKeySelector struct { // EnvFromSource represents the source of a set of ConfigMaps or Secrets type EnvFromSource struct { - // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER. + // Optional text to prepend to the name of each environment variable. + // May consist of any printable ASCII characters except '='. // +optional Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` // The ConfigMap to select from @@ -2697,7 +2835,7 @@ type ResourceRequirements struct { // Claims lists the names of resources, defined in spec.resourceClaims, // that are used by this container. // - // This is an alpha field and requires enabling the + // This field depends on the // DynamicResourceAllocation feature gate. // // This field is immutable. It can only be set for containers. @@ -2805,8 +2943,8 @@ type Container struct { // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -2832,10 +2970,10 @@ type Container struct { // +listType=atomic ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // RestartPolicy defines the restart behavior of individual containers in a pod. - // This field may only be set for init containers, and the only allowed value is "Always". - // For non-init containers or when this field is not specified, + // This overrides the pod-level restart policy. When this field is not specified, // the restart behavior is defined by the Pod's restart policy and the container type. - // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // Additionally, setting the RestartPolicy as "Always" for the init container will + // have the following effect: // this init container will be continually restarted on // exit until all regular containers have terminated. Once all regular // containers have completed, all init containers with restartPolicy "Always" @@ -2849,6 +2987,21 @@ type Container struct { // +featureGate=SidecarContainers // +optional RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. The rules are evaluated in + // order. Once a rule matches a container exit condition, the remaining + // rules are ignored. If no rule matches the container exit condition, + // the Container-level restart policy determines the whether the container + // is restarted or not. Constraints on the rules: + // - At most 20 rules are allowed. + // - Rules can have the same action. + // - Identical rules are not forbidden in validations. + // When rules are specified, container MUST set RestartPolicy explicitly + // even it if matches the Pod's RestartPolicy. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -3478,11 +3631,64 @@ const ( ) // ContainerRestartPolicy is the restart policy for a single container. -// This may only be set for init containers and only allowed value is "Always". +// The only allowed values are "Always", "Never", and "OnFailure". type ContainerRestartPolicy string const ( - ContainerRestartPolicyAlways ContainerRestartPolicy = "Always" + ContainerRestartPolicyAlways ContainerRestartPolicy = "Always" + ContainerRestartPolicyNever ContainerRestartPolicy = "Never" + ContainerRestartPolicyOnFailure ContainerRestartPolicy = "OnFailure" +) + +// ContainerRestartRule describes how a container exit is handled. +type ContainerRestartRule struct { + // Specifies the action taken on a container exit if the requirements + // are satisfied. The only possible value is "Restart" to restart the + // container. + // +required + Action ContainerRestartRuleAction `json:"action,omitempty" proto:"bytes,1,opt,name=action" protobuf:"bytes,1,opt,name=action,casttype=ContainerRestartRuleAction"` + + // Represents the exit codes to check on container exits. + // +optional + // +oneOf=when + ExitCodes *ContainerRestartRuleOnExitCodes `json:"exitCodes,omitempty" proto:"bytes,2,opt,name=exitCodes" protobuf:"bytes,2,opt,name=exitCodes"` +} + +// ContainerRestartRuleAction describes the action to take when the +// container exits. +type ContainerRestartRuleAction string + +// The only valid action is Restart. +const ( + ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart" +) + +// ContainerRestartRuleOnExitCodes describes the condition +// for handling an exited container based on its exit codes. +type ContainerRestartRuleOnExitCodes struct { + // Represents the relationship between the container exit code(s) and the + // specified values. Possible values are: + // - In: the requirement is satisfied if the container exit code is in the + // set of specified values. + // - NotIn: the requirement is satisfied if the container exit code is + // not in the set of specified values. + // +required + Operator ContainerRestartRuleOnExitCodesOperator `json:"operator,omitempty" proto:"bytes,1,opt,name=operator" protobuf:"bytes,1,opt,name=operator,casttype=ContainerRestartRuleOnExitCodesOperator"` + + // Specifies the set of values to check for container exit codes. + // At most 255 elements are allowed. + // +optional + // +listType=set + Values []int32 `json:"values,omitempty" proto:"varint,2,rep,name=values" protobuf:"varint,2,rep,name=values"` +} + +// ContainerRestartRuleOnExitCodesOperator describes the operator +// to take for the exit codes. +type ContainerRestartRuleOnExitCodesOperator string + +const ( + ContainerRestartRuleOnExitCodesOpIn ContainerRestartRuleOnExitCodesOperator = "In" + ContainerRestartRuleOnExitCodesOpNotIn ContainerRestartRuleOnExitCodesOperator = "NotIn" ) // DNSPolicy defines how a pod's DNS will be configured. @@ -3678,8 +3884,8 @@ type PodAntiAffinity struct { // most preferred is the one with the greatest sum of weights, i.e. // for each node that meets all of the scheduling requirements (resource // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // compute a sum by iterating through the elements of this field and subtracting + // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional // +listType=atomic @@ -3806,7 +4012,6 @@ type Taint struct { // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. // +optional TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` } @@ -3983,7 +4188,9 @@ type PodSpec struct { // +optional NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. + // When using HostNetwork you should specify ports so the scheduler is aware. + // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, + // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. // Default to false. // +k8s:conversion-gen=false // +optional @@ -4126,6 +4333,7 @@ type PodSpec struct { // - spec.hostPID // - spec.hostIPC // - spec.hostUsers + // - spec.resources // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile @@ -4194,7 +4402,7 @@ type PodSpec struct { ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` // Resources is the total amount of CPU and Memory resources required by all // containers in the pod. It supports specifying Requests and Limits for - // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported. // // This field enables fine-grained control over resource allocation for the // entire pod, allowing resource sharing among containers in a pod. @@ -4206,6 +4414,20 @@ type PodSpec struct { // +featureGate=PodLevelResources // +optional Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"` + // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. + // This field only specifies the pod's hostname and does not affect its DNS records. + // When this field is set to a non-empty string: + // - It takes precedence over the values set in `hostname` and `subdomain`. + // - The Pod's hostname will be set to this value. + // - `setHostnameAsFQDN` must be nil or set to false. + // - `hostNetwork` must be set to false. + // + // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. + // Requires the HostnameOverride feature gate to be enabled. + // + // +featureGate=HostnameOverride + // +optional + HostnameOverride *string `json:"hostnameOverride,omitempty" protobuf:"bytes,41,opt,name=hostnameOverride"` } // PodResourceClaim references exactly one ResourceClaim, either directly @@ -4267,6 +4489,31 @@ type PodResourceClaimStatus struct { ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"` } +// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended +// resource requests backed by DRA. It stores the generated name for +// the corresponding special ResourceClaim created by the scheduler. +type PodExtendedResourceClaimStatus struct { + // RequestMappings identifies the mapping of to device request + // in the generated ResourceClaim. + // +listType=atomic + RequestMappings []ContainerExtendedResourceRequest `json:"requestMappings" protobuf:"bytes,1,rep,name=requestMappings"` + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. + ResourceClaimName string `json:"resourceClaimName" protobuf:"bytes,2,name=resourceClaimName"` +} + +// ContainerExtendedResourceRequest has the mapping of container name, +// extended resource name to the device request name. +type ContainerExtendedResourceRequest struct { + // The name of the container requesting resources. + ContainerName string `json:"containerName" protobuf:"bytes,1,name=containerName"` + // The name of the extended resource in that container which gets backed by DRA. + ResourceName string `json:"resourceName" protobuf:"bytes,2,name=resourceName"` + // The name of the request in the special ResourceClaim which corresponds to the extended resource. + RequestName string `json:"requestName" protobuf:"bytes,3,name=requestName"` +} + // OSName is the set of OS'es that can be used in OS. type OSName string @@ -4799,8 +5046,8 @@ type EphemeralContainerCommon struct { // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -4826,11 +5073,17 @@ type EphemeralContainerCommon struct { ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Restart policy for the container to manage the restart behavior of each // container within a pod. - // This may only be set for init containers. You cannot set this field on - // ephemeral containers. + // You cannot set this field on ephemeral containers. // +featureGate=SidecarContainers // +optional RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. You cannot set this field on + // ephemeral containers. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"` // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -5091,6 +5344,10 @@ type PodStatus struct { // +featureGate=DynamicResourceAllocation // +optional ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"` + // Status of extended resource claim backed by DRA. + // +featureGate=DRAExtendedResource + // +optional + ExtendedResourceClaimStatus *PodExtendedResourceClaimStatus `json:"extendedResourceClaimStatus,omitempty" protobuf:"bytes,18,opt,name=extendedResourceClaimStatus"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5311,6 +5568,7 @@ type ReplicationControllerCondition struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.0 +// +k8s:supportsSubresource=/scale // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { diff --git a/openshift/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 9e987eefdd..1204307667 100644 --- a/openshift/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -356,11 +356,12 @@ var map_Container = map[string]string{ "args": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", - "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "resizePolicy": "Resources resize policy for the container.", - "restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + "restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + "restartPolicyRules": "Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -380,6 +381,17 @@ func (Container) SwaggerDoc() map[string]string { return map_Container } +var map_ContainerExtendedResourceRequest = map[string]string{ + "": "ContainerExtendedResourceRequest has the mapping of container name, extended resource name to the device request name.", + "containerName": "The name of the container requesting resources.", + "resourceName": "The name of the extended resource in that container which gets backed by DRA.", + "requestName": "The name of the request in the special ResourceClaim which corresponds to the extended resource.", +} + +func (ContainerExtendedResourceRequest) SwaggerDoc() map[string]string { + return map_ContainerExtendedResourceRequest +} + var map_ContainerImage = map[string]string{ "": "Describe a container image", "names": "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]", @@ -413,6 +425,26 @@ func (ContainerResizePolicy) SwaggerDoc() map[string]string { return map_ContainerResizePolicy } +var map_ContainerRestartRule = map[string]string{ + "": "ContainerRestartRule describes how a container exit is handled.", + "action": "Specifies the action taken on a container exit if the requirements are satisfied. The only possible value is \"Restart\" to restart the container.", + "exitCodes": "Represents the exit codes to check on container exits.", +} + +func (ContainerRestartRule) SwaggerDoc() map[string]string { + return map_ContainerRestartRule +} + +var map_ContainerRestartRuleOnExitCodes = map[string]string{ + "": "ContainerRestartRuleOnExitCodes describes the condition for handling an exited container based on its exit codes.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Possible values are: - In: the requirement is satisfied if the container exit code is in the\n set of specified values.\n- NotIn: the requirement is satisfied if the container exit code is\n not in the set of specified values.", + "values": "Specifies the set of values to check for container exit codes. At most 255 elements are allowed.", +} + +func (ContainerRestartRuleOnExitCodes) SwaggerDoc() map[string]string { + return map_ContainerRestartRuleOnExitCodes +} + var map_ContainerState = map[string]string{ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", "waiting": "Details about a waiting container", @@ -597,7 +629,7 @@ func (EndpointsList) SwaggerDoc() map[string]string { var map_EnvFromSource = map[string]string{ "": "EnvFromSource represents the source of a set of ConfigMaps or Secrets", - "prefix": "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.", + "prefix": "Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='.", "configMapRef": "The ConfigMap to select from", "secretRef": "The Secret to select from", } @@ -608,7 +640,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string { var map_EnvVar = map[string]string{ "": "EnvVar represents an environment variable present in a Container.", - "name": "Name of the environment variable. Must be a C_IDENTIFIER.", + "name": "Name of the environment variable. May consist of any printable ASCII characters except '='.", "value": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", } @@ -623,6 +655,7 @@ var map_EnvVarSource = map[string]string{ "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", "configMapKeyRef": "Selects a key of a ConfigMap.", "secretKeyRef": "Selects a key of a secret in the pod's namespace", + "fileKeyRef": "FileKeyRef selects a key of the env file. Requires the EnvFiles feature gate to be enabled.", } func (EnvVarSource) SwaggerDoc() map[string]string { @@ -646,11 +679,12 @@ var map_EphemeralContainerCommon = map[string]string{ "args": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "Ports are not allowed for ephemeral containers.", - "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", "resizePolicy": "Resources resize policy for the container.", - "restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.", + "restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. You cannot set this field on ephemeral containers.", + "restartPolicyRules": "Represents a list of rules to be checked to determine if the container should be restarted on exit. You cannot set this field on ephemeral containers.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", @@ -754,6 +788,18 @@ func (FCVolumeSource) SwaggerDoc() map[string]string { return map_FCVolumeSource } +var map_FileKeySelector = map[string]string{ + "": "FileKeySelector selects a key of the env file.", + "volumeName": "The name of the volume mount containing the env file.", + "path": "The path within the volume from which to select the file. Must be relative and may not contain the '..' path or start with '..'.", + "key": "The key within the env file. An invalid key will prevent the pod from starting. The keys defined within a source may consist of any printable ASCII characters except '='. During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.", + "optional": "Specify whether the file or its key must be defined. If the file or key does not exist, then the env var is not published. If optional is set to true and the specified key does not exist, the environment variable will not be set in the Pod's containers.\n\nIf optional is set to false and the specified key does not exist, an error will be returned during Pod creation.", +} + +func (FileKeySelector) SwaggerDoc() map[string]string { + return map_FileKeySelector +} + var map_FlexPersistentVolumeSource = map[string]string{ "": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", "driver": "driver is the name of the driver to use for this volume.", @@ -837,7 +883,7 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "endpoints is the endpoint name that details Glusterfs topology.", "path": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "readOnly": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } @@ -1446,7 +1492,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", - "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string or nil value indicates that no VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1461,8 +1507,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{ "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", - "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1539,7 +1585,7 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", - "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1606,7 +1652,7 @@ func (PodAffinityTerm) SwaggerDoc() map[string]string { var map_PodAntiAffinity = map[string]string{ "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", "requiredDuringSchedulingIgnoredDuringExecution": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and subtracting \"weight\" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", } func (PodAntiAffinity) SwaggerDoc() map[string]string { @@ -1626,6 +1672,20 @@ func (PodAttachOptions) SwaggerDoc() map[string]string { return map_PodAttachOptions } +var map_PodCertificateProjection = map[string]string{ + "": "PodCertificateProjection provides a private key and X.509 certificate in the pod filesystem.", + "signerName": "Kubelet's generated CSRs will be addressed to this signer.", + "keyType": "The type of keypair Kubelet will generate for the pod.\n\nValid values are \"RSA3072\", \"RSA4096\", \"ECDSAP256\", \"ECDSAP384\", \"ECDSAP521\", and \"ED25519\".", + "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nKubelet copies this value verbatim into the PodCertificateRequests it generates for this projection.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.", + "credentialBundlePath": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain. If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.", + "keyPath": "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.", + "certificateChainPath": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.", +} + +func (PodCertificateProjection) SwaggerDoc() map[string]string { + return map_PodCertificateProjection +} + var map_PodCondition = map[string]string{ "": "PodCondition contains details for the current condition of this pod.", "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", @@ -1676,6 +1736,16 @@ func (PodExecOptions) SwaggerDoc() map[string]string { return map_PodExecOptions } +var map_PodExtendedResourceClaimStatus = map[string]string{ + "": "PodExtendedResourceClaimStatus is stored in the PodStatus for the extended resource requests backed by DRA. It stores the generated name for the corresponding special ResourceClaim created by the scheduler.", + "requestMappings": "RequestMappings identifies the mapping of to device request in the generated ResourceClaim.", + "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod.", +} + +func (PodExtendedResourceClaimStatus) SwaggerDoc() map[string]string { + return map_PodExtendedResourceClaimStatus +} + var map_PodIP = map[string]string{ "": "PodIP represents a single IP address allocated to the pod.", "ip": "IP is the IP address assigned to the pod", @@ -1824,7 +1894,7 @@ var map_PodSpec = map[string]string{ "serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", "nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", - "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", @@ -1846,11 +1916,12 @@ var map_PodSpec = map[string]string{ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", - "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", - "resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.", + "resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\", \"memory\" and \"hugepages-\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.", + "hostnameOverride": "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1858,24 +1929,25 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", - "hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", - "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status", - "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", - "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.", - "resourceClaimStatuses": "Status of resource claims.", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", + "hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", + "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status", + "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", + "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.", + "resourceClaimStatuses": "Status of resource claims.", + "extendedResourceClaimStatus": "Status of extended resource claim backed by DRA.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -2205,7 +2277,7 @@ var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", + "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis field depends on the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", } func (ResourceRequirements) SwaggerDoc() map[string]string { @@ -2587,7 +2659,7 @@ var map_Taint = map[string]string{ "key": "Required. The taint key to be applied to a node.", "value": "The taint value corresponding to the taint key.", "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", - "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", + "timeAdded": "TimeAdded represents the time at which the taint was added.", } func (Taint) SwaggerDoc() map[string]string { @@ -2727,6 +2799,7 @@ var map_VolumeProjection = map[string]string{ "configMap": "configMap information about the configMap data to project", "serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project", "clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", + "podCertificate": "Projects an auto-rotating credential bundle (private key and certificate chain) that the pod can use either as a TLS client or server.\n\nKubelet generates a private key and uses it to send a PodCertificateRequest to the named signer. Once the signer approves the request and issues a certificate chain, Kubelet writes the key and certificate chain to the pod filesystem. The pod does not start until certificates have been issued for each podCertificate projected volume source in its spec.\n\nKubelet will begin trying to rotate the certificate at the time indicated by the signer using the PodCertificateRequest.Status.BeginRefreshAt timestamp.\n\nKubelet can write a single file, indicated by the credentialBundlePath field, or separate files, indicated by the keyPath and certificateChainPath fields.\n\nThe credential bundle is a single file in PEM format. The first PEM entry is the private key (in PKCS#8 format), and the remaining PEM entries are the certificate chain issued by the signer (typically, signers will return their certificate chain in leaf-to-root order).\n\nPrefer using the credential bundle format, since your application code can read it atomically. If you use keyPath and certificateChainPath, your application must make two separate file reads. If these coincide with a certificate rotation, it is possible that the private key and leaf certificate you read may not correspond to each other. Your application will need to check for this condition, and re-read until they are consistent.\n\nThe named signer controls chooses the format of the certificate it issues; consult the signer implementation's documentation to learn how to use the certificates it issues.", } func (VolumeProjection) SwaggerDoc() map[string]string { @@ -2752,10 +2825,10 @@ var map_VolumeSource = map[string]string{ "gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", - "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi", + "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.", "persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.", "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", diff --git a/openshift/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/openshift/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 619c525427..bcd91bd019 100644 --- a/openshift/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/openshift/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -829,6 +829,13 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(ContainerRestartPolicy) **out = **in } + if in.RestartPolicyRules != nil { + in, out := &in.RestartPolicyRules, &out.RestartPolicyRules + *out = make([]ContainerRestartRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -879,6 +886,22 @@ func (in *Container) DeepCopy() *Container { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerExtendedResourceRequest) DeepCopyInto(out *ContainerExtendedResourceRequest) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerExtendedResourceRequest. +func (in *ContainerExtendedResourceRequest) DeepCopy() *ContainerExtendedResourceRequest { + if in == nil { + return nil + } + out := new(ContainerExtendedResourceRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { *out = *in @@ -932,6 +955,48 @@ func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRestartRule) DeepCopyInto(out *ContainerRestartRule) { + *out = *in + if in.ExitCodes != nil { + in, out := &in.ExitCodes, &out.ExitCodes + *out = new(ContainerRestartRuleOnExitCodes) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRule. +func (in *ContainerRestartRule) DeepCopy() *ContainerRestartRule { + if in == nil { + return nil + } + out := new(ContainerRestartRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRestartRuleOnExitCodes) DeepCopyInto(out *ContainerRestartRuleOnExitCodes) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRuleOnExitCodes. +func (in *ContainerRestartRuleOnExitCodes) DeepCopy() *ContainerRestartRuleOnExitCodes { + if in == nil { + return nil + } + out := new(ContainerRestartRuleOnExitCodes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in @@ -1433,6 +1498,11 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { *out = new(SecretKeySelector) (*in).DeepCopyInto(*out) } + if in.FileKeyRef != nil { + in, out := &in.FileKeyRef, &out.FileKeyRef + *out = new(FileKeySelector) + (*in).DeepCopyInto(*out) + } return } @@ -1506,6 +1576,13 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) *out = new(ContainerRestartPolicy) **out = **in } + if in.RestartPolicyRules != nil { + in, out := &in.RestartPolicyRules, &out.RestartPolicyRules + *out = make([]ContainerRestartRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -1736,6 +1813,27 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileKeySelector) DeepCopyInto(out *FileKeySelector) { + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileKeySelector. +func (in *FileKeySelector) DeepCopy() *FileKeySelector { + if in == nil { + return nil + } + out := new(FileKeySelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { *out = *in @@ -3797,6 +3895,27 @@ func (in *PodAttachOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection) { + *out = *in + if in.MaxExpirationSeconds != nil { + in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateProjection. +func (in *PodCertificateProjection) DeepCopy() *PodCertificateProjection { + if in == nil { + return nil + } + out := new(PodCertificateProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodCondition) DeepCopyInto(out *PodCondition) { *out = *in @@ -3899,6 +4018,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExtendedResourceClaimStatus) DeepCopyInto(out *PodExtendedResourceClaimStatus) { + *out = *in + if in.RequestMappings != nil { + in, out := &in.RequestMappings, &out.RequestMappings + *out = make([]ContainerExtendedResourceRequest, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExtendedResourceClaimStatus. +func (in *PodExtendedResourceClaimStatus) DeepCopy() *PodExtendedResourceClaimStatus { + if in == nil { + return nil + } + out := new(PodExtendedResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodIP) DeepCopyInto(out *PodIP) { *out = *in @@ -4412,6 +4552,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.HostnameOverride != nil { + in, out := &in.HostnameOverride, &out.HostnameOverride + *out = new(string) + **out = **in + } return } @@ -4477,6 +4622,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ExtendedResourceClaimStatus != nil { + in, out := &in.ExtendedResourceClaimStatus, &out.ExtendedResourceClaimStatus + *out = new(PodExtendedResourceClaimStatus) + (*in).DeepCopyInto(*out) + } return } @@ -6412,6 +6562,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = new(ClusterTrustBundleProjection) (*in).DeepCopyInto(*out) } + if in.PodCertificate != nil { + in, out := &in.PodCertificate, &out.PodCertificate + *out = new(PodCertificateProjection) + (*in).DeepCopyInto(*out) + } return } diff --git a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go index 7770fab5d2..be710973cb 100644 --- a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -18,5 +18,7 @@ limitations under the License. // +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true +// +k8s:validation-gen=TypeMeta +// +k8s:validation-gen-input=k8s.io/api/extensions/v1beta1 package v1beta1 diff --git a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 70fcec0cc5..fed0b4835d 100644 --- a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -980,7 +980,7 @@ message RollingUpdateDaemonSet { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may @@ -1039,6 +1039,9 @@ message Scale { message ScaleSpec { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types.go b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types.go index b80a7a7e16..c7b50e0590 100644 --- a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -27,6 +27,9 @@ import ( type ScaleSpec struct { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } @@ -54,6 +57,7 @@ type ScaleStatus struct { // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.2 // +k8s:prerelease-lifecycle-gen:removed=1.16 +// +k8s:isSubresource=/scale // represents a scaling request for a resource. type Scale struct { @@ -398,7 +402,7 @@ type RollingUpdateDaemonSet struct { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 923fab3aa1..8a158233ef 100644 --- a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -482,7 +482,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { diff --git a/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go new file mode 100644 index 0000000000..6d2a1666ae --- /dev/null +++ b/openshift/tools/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go @@ -0,0 +1,78 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by validation-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + fmt "fmt" + + operation "k8s.io/apimachinery/pkg/api/operation" + safe "k8s.io/apimachinery/pkg/api/safe" + validate "k8s.io/apimachinery/pkg/api/validate" + runtime "k8s.io/apimachinery/pkg/runtime" + field "k8s.io/apimachinery/pkg/util/validation/field" +) + +func init() { localSchemeBuilder.Register(RegisterValidations) } + +// RegisterValidations adds validation functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterValidations(scheme *runtime.Scheme) error { + scheme.AddValidationFunc((*Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList { + switch op.Request.SubresourcePath() { + case "/scale": + return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*Scale), safe.Cast[*Scale](oldObj)) + } + return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))} + }) + return nil +} + +func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *Scale) (errs field.ErrorList) { + // field Scale.TypeMeta has no validation + // field Scale.ObjectMeta has no validation + + // field Scale.Spec + errs = append(errs, + func(fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) { + errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...) + return + }(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *Scale) *ScaleSpec { return &oldObj.Spec }))...) + + // field Scale.Status has no validation + return errs +} + +func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) { + // field ScaleSpec.Replicas + errs = append(errs, + func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) { + // optional value-type fields with zero-value defaults are purely documentation + if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) { + return nil // no changes + } + errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...) + return + }(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *ScaleSpec) *int32 { return &oldObj.Replicas }))...) + + return errs +} diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/networking/v1/generated.proto index e3e3e9215e..16a2792aa6 100644 --- a/openshift/tools/vendor/k8s.io/api/networking/v1/generated.proto +++ b/openshift/tools/vendor/k8s.io/api/networking/v1/generated.proto @@ -534,11 +534,12 @@ message NetworkPolicyPort { // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { // podSelector selects the pods to which this NetworkPolicy object applies. - // The array of ingress rules is applied to any pods selected by this field. + // The array of rules is applied to any pods selected by this field. An empty + // selector matches all pods in the policy's namespace. // Multiple network policies can select the same set of pods. In this case, // the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. + // This field is optional. If it is not specified, it defaults to an empty selector. + // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // ingress is a list of ingress rules to be applied to the selected pods. diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1/types.go b/openshift/tools/vendor/k8s.io/api/networking/v1/types.go index 216647ceeb..7d9a4fc94c 100644 --- a/openshift/tools/vendor/k8s.io/api/networking/v1/types.go +++ b/openshift/tools/vendor/k8s.io/api/networking/v1/types.go @@ -60,11 +60,12 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { // podSelector selects the pods to which this NetworkPolicy object applies. - // The array of ingress rules is applied to any pods selected by this field. + // The array of rules is applied to any pods selected by this field. An empty + // selector matches all pods in the policy's namespace. // Multiple network policies can select the same set of pods. In this case, // the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. + // This field is optional. If it is not specified, it defaults to an empty selector. + // +optional PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` // ingress is a list of ingress rules to be applied to the selected pods. diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 0e294848ba..6210bb7a5a 100644 --- a/openshift/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/openshift/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -313,7 +313,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of rules is applied to any pods selected by this field. An empty selector matches all pods in the policy's namespace. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is optional. If it is not specified, it defaults to an empty selector.", "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/doc.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/doc.go deleted file mode 100644 index 55264ae707..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true -// +k8s:prerelease-lifecycle-gen=true -// +groupName=networking.k8s.io - -package v1alpha1 diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go deleted file mode 100644 index 0d42034837..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1929 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/networking/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *IPAddress) Reset() { *m = IPAddress{} } -func (*IPAddress) ProtoMessage() {} -func (*IPAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{0} -} -func (m *IPAddress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddress.Merge(m, src) -} -func (m *IPAddress) XXX_Size() int { - return m.Size() -} -func (m *IPAddress) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddress.DiscardUnknown(m) -} - -var xxx_messageInfo_IPAddress proto.InternalMessageInfo - -func (m *IPAddressList) Reset() { *m = IPAddressList{} } -func (*IPAddressList) ProtoMessage() {} -func (*IPAddressList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{1} -} -func (m *IPAddressList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPAddressList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressList.Merge(m, src) -} -func (m *IPAddressList) XXX_Size() int { - return m.Size() -} -func (m *IPAddressList) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressList.DiscardUnknown(m) -} - -var xxx_messageInfo_IPAddressList proto.InternalMessageInfo - -func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } -func (*IPAddressSpec) ProtoMessage() {} -func (*IPAddressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{2} -} -func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPAddressSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressSpec.Merge(m, src) -} -func (m *IPAddressSpec) XXX_Size() int { - return m.Size() -} -func (m *IPAddressSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo - -func (m *ParentReference) Reset() { *m = ParentReference{} } -func (*ParentReference) ProtoMessage() {} -func (*ParentReference) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{3} -} -func (m *ParentReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ParentReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParentReference.Merge(m, src) -} -func (m *ParentReference) XXX_Size() int { - return m.Size() -} -func (m *ParentReference) XXX_DiscardUnknown() { - xxx_messageInfo_ParentReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ParentReference proto.InternalMessageInfo - -func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } -func (*ServiceCIDR) ProtoMessage() {} -func (*ServiceCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{4} -} -func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDR.Merge(m, src) -} -func (m *ServiceCIDR) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo - -func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } -func (*ServiceCIDRList) ProtoMessage() {} -func (*ServiceCIDRList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{5} -} -func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRList.Merge(m, src) -} -func (m *ServiceCIDRList) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDRList) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo - -func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } -func (*ServiceCIDRSpec) ProtoMessage() {} -func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{6} -} -func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) -} -func (m *ServiceCIDRSpec) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo - -func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } -func (*ServiceCIDRStatus) ProtoMessage() {} -func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{7} -} -func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) -} -func (m *ServiceCIDRStatus) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo - -func init() { - proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") - proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") - proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") - proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") - proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR") - proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList") - proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec") - proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus") -} - -func init() { - proto.RegisterFile("k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1cb39e7b48ce50d) -} - -var fileDescriptor_c1cb39e7b48ce50d = []byte{ - // 634 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4a, - 0x18, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0xb7, 0xb7, 0xb7, 0x5e, 0x45, 0x5d, 0x38, 0x91, 0xef, 0xa6, - 0x08, 0x3a, 0x26, 0x11, 0x42, 0x6c, 0x71, 0x2b, 0xa1, 0x4a, 0xd0, 0x96, 0xe9, 0x0a, 0xd4, 0x05, - 0xd3, 0xc9, 0x57, 0x67, 0x08, 0xfe, 0xd1, 0xcc, 0x24, 0xc0, 0x8e, 0x47, 0xe0, 0x05, 0x78, 0x0e, - 0x56, 0x20, 0xb1, 0xeb, 0xb2, 0xcb, 0xae, 0x2a, 0x6a, 0x5e, 0x04, 0xcd, 0xd8, 0xb1, 0x93, 0x46, - 0xfd, 0xdb, 0x74, 0xe7, 0xef, 0xcc, 0x39, 0x67, 0xbe, 0xf3, 0xcd, 0x8c, 0x8c, 0xf0, 0xf0, 0x99, - 0xc4, 0x3c, 0xf1, 0x69, 0xca, 0xfd, 0x18, 0xd4, 0xc7, 0x44, 0x0c, 0x79, 0x1c, 0xfa, 0xe3, 0x2e, - 0xfd, 0x90, 0x0e, 0x68, 0xd7, 0x0f, 0x21, 0x06, 0x41, 0x15, 0xf4, 0x71, 0x2a, 0x12, 0x95, 0x38, - 0x6e, 0xce, 0xc7, 0x34, 0xe5, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0x33, 0xe4, 0x6a, 0x30, 0x3a, - 0xc2, 0x2c, 0x89, 0xfc, 0x30, 0x09, 0x13, 0xdf, 0xc8, 0x8e, 0x46, 0xc7, 0xa6, 0x32, 0x85, 0xf9, - 0xca, 0xed, 0xd6, 0x9f, 0x54, 0xdb, 0x47, 0x94, 0x0d, 0x78, 0x0c, 0xe2, 0xb3, 0x9f, 0x0e, 0x43, - 0x0d, 0x48, 0x3f, 0x02, 0x45, 0xfd, 0xf1, 0x5c, 0x13, 0xeb, 0xfe, 0x55, 0x2a, 0x31, 0x8a, 0x15, - 0x8f, 0x60, 0x4e, 0xf0, 0xf4, 0x26, 0x81, 0x64, 0x03, 0x88, 0xe8, 0x65, 0x9d, 0xf7, 0xd3, 0x42, - 0xf6, 0xce, 0xfe, 0xf3, 0x7e, 0x5f, 0x80, 0x94, 0xce, 0x3b, 0xb4, 0xac, 0x3b, 0xea, 0x53, 0x45, - 0x5b, 0x56, 0xc7, 0xda, 0x68, 0xf6, 0x1e, 0xe3, 0x6a, 0x1c, 0xa5, 0x31, 0x4e, 0x87, 0xa1, 0x06, - 0x24, 0xd6, 0x6c, 0x3c, 0xee, 0xe2, 0xbd, 0xa3, 0xf7, 0xc0, 0xd4, 0x2b, 0x50, 0x34, 0x70, 0x4e, - 0xce, 0xdb, 0xb5, 0xec, 0xbc, 0x8d, 0x2a, 0x8c, 0x94, 0xae, 0xce, 0x1e, 0xaa, 0xcb, 0x14, 0x58, - 0x6b, 0xc1, 0xb8, 0x6f, 0xe2, 0xeb, 0x87, 0x8d, 0xcb, 0xd6, 0x0e, 0x52, 0x60, 0xc1, 0x3f, 0x85, - 0x75, 0x5d, 0x57, 0xc4, 0x18, 0x79, 0x3f, 0x2c, 0xb4, 0x52, 0xb2, 0x5e, 0x72, 0xa9, 0x9c, 0xc3, - 0xb9, 0x10, 0xf8, 0x76, 0x21, 0xb4, 0xda, 0x44, 0xf8, 0xaf, 0xd8, 0x67, 0x79, 0x82, 0x4c, 0x05, - 0xd8, 0x45, 0x0d, 0xae, 0x20, 0x92, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xf7, 0xe0, 0xd6, 0x09, - 0x82, 0x95, 0xc2, 0xb5, 0xb1, 0xa3, 0xf5, 0x24, 0xb7, 0xf1, 0xa2, 0xa9, 0xf6, 0x75, 0x2c, 0xe7, - 0x10, 0xd9, 0x29, 0x15, 0x10, 0x2b, 0x02, 0xc7, 0x45, 0xff, 0xfe, 0x4d, 0x9b, 0xec, 0x4f, 0x04, - 0x20, 0x20, 0x66, 0x10, 0xac, 0x64, 0xe7, 0x6d, 0xbb, 0x04, 0x49, 0x65, 0xe8, 0x7d, 0xb7, 0xd0, - 0xea, 0x25, 0xb6, 0xf3, 0x3f, 0x6a, 0x84, 0x22, 0x19, 0xa5, 0x66, 0x37, 0xbb, 0xea, 0xf3, 0x85, - 0x06, 0x49, 0xbe, 0xe6, 0x3c, 0x42, 0xcb, 0x02, 0x64, 0x32, 0x12, 0x0c, 0xcc, 0xe1, 0xd9, 0xd5, - 0x94, 0x48, 0x81, 0x93, 0x92, 0xe1, 0xf8, 0xc8, 0x8e, 0x69, 0x04, 0x32, 0xa5, 0x0c, 0x5a, 0x8b, - 0x86, 0xbe, 0x56, 0xd0, 0xed, 0xdd, 0xc9, 0x02, 0xa9, 0x38, 0x4e, 0x07, 0xd5, 0x75, 0xd1, 0xaa, - 0x1b, 0x6e, 0x79, 0xd0, 0x9a, 0x4b, 0xcc, 0x8a, 0xf7, 0x6d, 0x01, 0x35, 0x0f, 0x40, 0x8c, 0x39, - 0x83, 0xad, 0x9d, 0x6d, 0x72, 0x0f, 0x77, 0xf5, 0xf5, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, - 0xbb, 0xea, 0xb6, 0x3a, 0x6f, 0xd0, 0x92, 0x54, 0x54, 0x8d, 0xa4, 0x19, 0x4a, 0xb3, 0xd7, 0xbd, - 0x8b, 0xa9, 0x11, 0x06, 0xff, 0x16, 0xb6, 0x4b, 0x79, 0x4d, 0x0a, 0x43, 0xef, 0x97, 0x85, 0x56, - 0xa7, 0xd8, 0xf7, 0xf0, 0x14, 0xf6, 0x67, 0x9f, 0xc2, 0xc3, 0x3b, 0x64, 0xb9, 0xe2, 0x31, 0xf4, - 0x66, 0x22, 0x98, 0xe7, 0xd0, 0x46, 0x0d, 0xc6, 0xfb, 0x42, 0xb6, 0xac, 0xce, 0xe2, 0x86, 0x1d, - 0xd8, 0x5a, 0xa3, 0x17, 0x25, 0xc9, 0x71, 0xef, 0x13, 0x5a, 0x9b, 0x1b, 0x92, 0xc3, 0x10, 0x62, - 0x49, 0xdc, 0xe7, 0x8a, 0x27, 0x71, 0x2e, 0x9d, 0x3d, 0xc0, 0x6b, 0xa2, 0x6f, 0x4d, 0x74, 0xd5, - 0xed, 0x28, 0x21, 0x49, 0xa6, 0x6c, 0x83, 0xed, 0x93, 0x0b, 0xb7, 0x76, 0x7a, 0xe1, 0xd6, 0xce, - 0x2e, 0xdc, 0xda, 0x97, 0xcc, 0xb5, 0x4e, 0x32, 0xd7, 0x3a, 0xcd, 0x5c, 0xeb, 0x2c, 0x73, 0xad, - 0xdf, 0x99, 0x6b, 0x7d, 0xfd, 0xe3, 0xd6, 0xde, 0xba, 0xd7, 0xff, 0x7f, 0xfe, 0x06, 0x00, 0x00, - 0xff, 0xff, 0xb1, 0xd0, 0x33, 0x02, 0xa0, 0x06, 0x00, 0x00, -} - -func (m *IPAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IPAddressList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ParentRef != nil { - { - size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ParentReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x1a - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.CIDRs) > 0 { - for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.CIDRs[iNdEx]) - copy(dAtA[i:], m.CIDRs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *IPAddress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IPAddressList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IPAddressSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ParentRef != nil { - l = m.ParentRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ParentReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceCIDR) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceCIDRList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceCIDRSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.CIDRs) > 0 { - for _, s := range m.CIDRs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceCIDRStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *IPAddress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPAddress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IPAddressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]IPAddress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IPAddressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IPAddressSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPAddressSpec{`, - `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ParentReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ParentReference{`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceCIDR{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDRList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ServiceCIDR{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ServiceCIDRList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDRSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceCIDRSpec{`, - `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDRStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ServiceCIDRStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *IPAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddressList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, IPAddress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ParentRef == nil { - m.ParentRef = &ParentReference{} - } - if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ParentReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ServiceCIDR{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/generated.proto deleted file mode 100644 index 80ec6af735..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.networking.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/networking/v1alpha1"; - -// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs -// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. -// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, -// the name of the object is the IP address in canonical format, four decimal digits separated -// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. -// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 -// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 -message IPAddress { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the IPAddress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional IPAddressSpec spec = 2; -} - -// IPAddressList contains a list of IPAddress. -message IPAddressList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of IPAddresses. - repeated IPAddress items = 2; -} - -// IPAddressSpec describe the attributes in an IP Address. -message IPAddressSpec { - // ParentRef references the resource that an IPAddress is attached to. - // An IPAddress must reference a parent object. - // +required - optional ParentReference parentRef = 1; -} - -// ParentReference describes a reference to a parent object. -message ParentReference { - // Group is the group of the object being referenced. - // +optional - optional string group = 1; - - // Resource is the resource of the object being referenced. - // +required - optional string resource = 2; - - // Namespace is the namespace of the object being referenced. - // +optional - optional string namespace = 3; - - // Name is the name of the object being referenced. - // +required - optional string name = 4; -} - -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). -// This range is used to allocate ClusterIPs to Service objects. -message ServiceCIDR { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ServiceCIDRSpec spec = 2; - - // status represents the current state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ServiceCIDRStatus status = 3; -} - -// ServiceCIDRList contains a list of ServiceCIDR objects. -message ServiceCIDRList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of ServiceCIDRs. - repeated ServiceCIDR items = 2; -} - -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. -message ServiceCIDRSpec { - // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") - // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. - // The network address of each CIDR, the address that identifies the subnet of a host, is reserved - // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be - // allocated. - // This field is immutable. - // +optional - // +listType=atomic - repeated string cidrs = 1; -} - -// ServiceCIDRStatus describes the current state of the ServiceCIDR. -message ServiceCIDRStatus { - // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; -} - diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/register.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/register.go deleted file mode 100644 index c8f5856b5d..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/register.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package. -const GroupName = "networking.k8s.io" - -// SchemeGroupVersion is group version used to register objects in this package. -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind. -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource. -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder holds functions that add things to a scheme. - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - - // AddToScheme adds the types of this group into the given scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &IPAddress{}, - &IPAddressList{}, - &ServiceCIDR{}, - &ServiceCIDRList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/types.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/types.go deleted file mode 100644 index 0e454f0263..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/types.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs -// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. -// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, -// the name of the object is the IP address in canonical format, four decimal digits separated -// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. -// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 -// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 -type IPAddress struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // spec is the desired state of the IPAddress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// IPAddressSpec describe the attributes in an IP Address. -type IPAddressSpec struct { - // ParentRef references the resource that an IPAddress is attached to. - // An IPAddress must reference a parent object. - // +required - ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` -} - -// ParentReference describes a reference to a parent object. -type ParentReference struct { - // Group is the group of the object being referenced. - // +optional - Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` - // Resource is the resource of the object being referenced. - // +required - Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` - // Namespace is the namespace of the object being referenced. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` - // Name is the name of the object being referenced. - // +required - Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// IPAddressList contains a list of IPAddress. -type IPAddressList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // items is the list of IPAddresses. - Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). -// This range is used to allocate ClusterIPs to Service objects. -type ServiceCIDR struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // spec is the desired state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // status represents the current state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. -type ServiceCIDRSpec struct { - // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") - // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. - // The network address of each CIDR, the address that identifies the subnet of a host, is reserved - // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be - // allocated. - // This field is immutable. - // +optional - // +listType=atomic - CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` -} - -const ( - // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the - // apiserver to allocate ClusterIPs for Services. - ServiceCIDRConditionReady = "Ready" - // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is - // being deleted. - ServiceCIDRReasonTerminating = "Terminating" -) - -// ServiceCIDRStatus describes the current state of the ServiceCIDR. -type ServiceCIDRStatus struct { - // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// ServiceCIDRList contains a list of ServiceCIDR objects. -type ServiceCIDRList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // items is the list of ServiceCIDRs. - Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 4c8eb57a7a..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_IPAddress = map[string]string{ - "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (IPAddress) SwaggerDoc() map[string]string { - return map_IPAddress -} - -var map_IPAddressList = map[string]string{ - "": "IPAddressList contains a list of IPAddress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of IPAddresses.", -} - -func (IPAddressList) SwaggerDoc() map[string]string { - return map_IPAddressList -} - -var map_IPAddressSpec = map[string]string{ - "": "IPAddressSpec describe the attributes in an IP Address.", - "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", -} - -func (IPAddressSpec) SwaggerDoc() map[string]string { - return map_IPAddressSpec -} - -var map_ParentReference = map[string]string{ - "": "ParentReference describes a reference to a parent object.", - "group": "Group is the group of the object being referenced.", - "resource": "Resource is the resource of the object being referenced.", - "namespace": "Namespace is the namespace of the object being referenced.", - "name": "Name is the name of the object being referenced.", -} - -func (ParentReference) SwaggerDoc() map[string]string { - return map_ParentReference -} - -var map_ServiceCIDR = map[string]string{ - "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (ServiceCIDR) SwaggerDoc() map[string]string { - return map_ServiceCIDR -} - -var map_ServiceCIDRList = map[string]string{ - "": "ServiceCIDRList contains a list of ServiceCIDR objects.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of ServiceCIDRs.", -} - -func (ServiceCIDRList) SwaggerDoc() map[string]string { - return map_ServiceCIDRList -} - -var map_ServiceCIDRSpec = map[string]string{ - "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", - "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.", -} - -func (ServiceCIDRSpec) SwaggerDoc() map[string]string { - return map_ServiceCIDRSpec -} - -var map_ServiceCIDRStatus = map[string]string{ - "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", - "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", -} - -func (ServiceCIDRStatus) SwaggerDoc() map[string]string { - return map_ServiceCIDRStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go deleted file mode 100644 index 5f9c23f708..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -const ( - - // TODO: Use IPFamily as field with a field selector,And the value is set based on - // the name at create time and immutable. - // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. - // This label simplify dual-stack client operations allowing to obtain the list of - // IP addresses filtered by family. - LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" - // LabelManagedBy is used to indicate the controller or entity that manages - // an IPAddress. This label aims to enable different IPAddress - // objects to be managed by different controllers or entities within the - // same cluster. It is highly recommended to configure this label for all - // IPAddress objects. - LabelManagedBy = "ipaddress.kubernetes.io/managed-by" -) diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 5c8f697ba3..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,229 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddress) DeepCopyInto(out *IPAddress) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. -func (in *IPAddress) DeepCopy() *IPAddress { - if in == nil { - return nil - } - out := new(IPAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddress) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]IPAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. -func (in *IPAddressList) DeepCopy() *IPAddressList { - if in == nil { - return nil - } - out := new(IPAddressList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddressList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { - *out = *in - if in.ParentRef != nil { - in, out := &in.ParentRef, &out.ParentRef - *out = new(ParentReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. -func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { - if in == nil { - return nil - } - out := new(IPAddressSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ParentReference) DeepCopyInto(out *ParentReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. -func (in *ParentReference) DeepCopy() *ParentReference { - if in == nil { - return nil - } - out := new(ParentReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. -func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { - if in == nil { - return nil - } - out := new(ServiceCIDR) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCIDR) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceCIDR, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. -func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { - if in == nil { - return nil - } - out := new(ServiceCIDRList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { - *out = *in - if in.CIDRs != nil { - in, out := &in.CIDRs, &out.CIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. -func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { - if in == nil { - return nil - } - out := new(ServiceCIDRSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. -func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { - if in == nil { - return nil - } - out := new(ServiceCIDRStatus) - in.DeepCopyInto(out) - return out -} diff --git a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index 714e7b6253..0000000000 --- a/openshift/tools/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,94 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1alpha1 - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddress) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} diff --git a/openshift/tools/vendor/k8s.io/api/resource/v1/devicetaint.go b/openshift/tools/vendor/k8s.io/api/resource/v1/devicetaint.go new file mode 100644 index 0000000000..a5c2e20a6e --- /dev/null +++ b/openshift/tools/vendor/k8s.io/api/resource/v1/devicetaint.go @@ -0,0 +1,35 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "fmt" + +var _ fmt.Stringer = DeviceTaint{} + +// String converts to a string in the format '=:', '=:', ':', or ''. +func (t DeviceTaint) String() string { + if len(t.Effect) == 0 { + if len(t.Value) == 0 { + return fmt.Sprintf("%v", t.Key) + } + return fmt.Sprintf("%v=%v:", t.Key, t.Value) + } + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/openshift/tools/vendor/k8s.io/api/resource/v1/doc.go b/openshift/tools/vendor/k8s.io/api/resource/v1/doc.go new file mode 100644 index 0000000000..c94ca75ddc --- /dev/null +++ b/openshift/tools/vendor/k8s.io/api/resource/v1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName=resource.k8s.io + +// Package v1 is the v1 version of the resource API. +package v1 diff --git a/openshift/tools/vendor/k8s.io/api/resource/v1/generated.pb.go b/openshift/tools/vendor/k8s.io/api/resource/v1/generated.pb.go new file mode 100644 index 0000000000..5695e2c7e0 --- /dev/null +++ b/openshift/tools/vendor/k8s.io/api/resource/v1/generated.pb.go @@ -0,0 +1,12777 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/resource/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} } +func (*AllocatedDeviceStatus) ProtoMessage() {} +func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{0} +} +func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src) +} +func (m *AllocatedDeviceStatus) XXX_Size() int { + return m.Size() +} +func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{1} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{2} +} +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) +} +func (m *CELDeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo + +func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} } +func (*CapacityRequestPolicy) ProtoMessage() {} +func (*CapacityRequestPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{3} +} +func (m *CapacityRequestPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CapacityRequestPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CapacityRequestPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRequestPolicy.Merge(m, src) +} +func (m *CapacityRequestPolicy) XXX_Size() int { + return m.Size() +} +func (m *CapacityRequestPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRequestPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRequestPolicy proto.InternalMessageInfo + +func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} } +func (*CapacityRequestPolicyRange) ProtoMessage() {} +func (*CapacityRequestPolicyRange) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{4} +} +func (m *CapacityRequestPolicyRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CapacityRequestPolicyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CapacityRequestPolicyRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRequestPolicyRange.Merge(m, src) +} +func (m *CapacityRequestPolicyRange) XXX_Size() int { + return m.Size() +} +func (m *CapacityRequestPolicyRange) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRequestPolicyRange.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRequestPolicyRange proto.InternalMessageInfo + +func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} } +func (*CapacityRequirements) ProtoMessage() {} +func (*CapacityRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{5} +} +func (m *CapacityRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CapacityRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CapacityRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRequirements.Merge(m, src) +} +func (m *CapacityRequirements) XXX_Size() int { + return m.Size() +} +func (m *CapacityRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRequirements proto.InternalMessageInfo + +func (m *Counter) Reset() { *m = Counter{} } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{6} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *CounterSet) Reset() { *m = CounterSet{} } +func (*CounterSet) ProtoMessage() {} +func (*CounterSet) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{7} +} +func (m *CounterSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CounterSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_CounterSet.Merge(m, src) +} +func (m *CounterSet) XXX_Size() int { + return m.Size() +} +func (m *CounterSet) XXX_DiscardUnknown() { + xxx_messageInfo_CounterSet.DiscardUnknown(m) +} + +var xxx_messageInfo_CounterSet proto.InternalMessageInfo + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{8} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{9} +} +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) +} +func (m *DeviceAllocationConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo + +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{10} +} +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) +} +func (m *DeviceAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo + +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{11} +} +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) +} +func (m *DeviceAttribute) XXX_Size() int { + return m.Size() +} +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo + +func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} } +func (*DeviceCapacity) ProtoMessage() {} +func (*DeviceCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{12} +} +func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceCapacity.Merge(m, src) +} +func (m *DeviceCapacity) XXX_Size() int { + return m.Size() +} +func (m *DeviceCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo + +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{13} +} +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) +} +func (m *DeviceClaim) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo + +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{14} +} +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) +} +func (m *DeviceClaimConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo + +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{15} +} +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) +} +func (m *DeviceClass) XXX_Size() int { + return m.Size() +} +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo + +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{16} +} +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) +} +func (m *DeviceClassConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo + +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{17} +} +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) +} +func (m *DeviceClassList) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo + +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{18} +} +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) +} +func (m *DeviceClassSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{19} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{20} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} } +func (*DeviceCounterConsumption) ProtoMessage() {} +func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{21} +} +func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceCounterConsumption.Merge(m, src) +} +func (m *DeviceCounterConsumption) XXX_Size() int { + return m.Size() +} +func (m *DeviceCounterConsumption) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{22} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{23} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{24} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} } +func (*DeviceSubRequest) ProtoMessage() {} +func (*DeviceSubRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{25} +} +func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSubRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSubRequest.Merge(m, src) +} +func (m *DeviceSubRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceSubRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo + +func (m *DeviceTaint) Reset() { *m = DeviceTaint{} } +func (*DeviceTaint) ProtoMessage() {} +func (*DeviceTaint) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{26} +} +func (m *DeviceTaint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceTaint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceTaint.Merge(m, src) +} +func (m *DeviceTaint) XXX_Size() int { + return m.Size() +} +func (m *DeviceTaint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceTaint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo + +func (m *DeviceToleration) Reset() { *m = DeviceToleration{} } +func (*DeviceToleration) ProtoMessage() {} +func (*DeviceToleration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{27} +} +func (m *DeviceToleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceToleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceToleration.Merge(m, src) +} +func (m *DeviceToleration) XXX_Size() int { + return m.Size() +} +func (m *DeviceToleration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceToleration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo + +func (m *ExactDeviceRequest) Reset() { *m = ExactDeviceRequest{} } +func (*ExactDeviceRequest) ProtoMessage() {} +func (*ExactDeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{28} +} +func (m *ExactDeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExactDeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExactDeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExactDeviceRequest.Merge(m, src) +} +func (m *ExactDeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *ExactDeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExactDeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExactDeviceRequest proto.InternalMessageInfo + +func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} } +func (*NetworkDeviceData) ProtoMessage() {} +func (*NetworkDeviceData) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{29} +} +func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkDeviceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkDeviceData.Merge(m, src) +} +func (m *NetworkDeviceData) XXX_Size() int { + return m.Size() +} +func (m *NetworkDeviceData) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{30} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{31} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{32} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{33} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{34} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{35} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{36} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{37} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{38} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{39} +} +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) +} +func (m *ResourcePool) XXX_Size() int { + return m.Size() +} +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo + +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{40} +} +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) +} +func (m *ResourceSlice) XXX_Size() int { + return m.Size() +} +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo + +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{41} +} +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) +} +func (m *ResourceSliceList) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo + +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{42} +} +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) +} +func (m *ResourceSliceSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1.AllocatedDeviceStatus") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1.AllocationResult") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1.CELDeviceSelector") + proto.RegisterType((*CapacityRequestPolicy)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicy") + proto.RegisterType((*CapacityRequestPolicyRange)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicyRange") + proto.RegisterType((*CapacityRequirements)(nil), "k8s.io.api.resource.v1.CapacityRequirements") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.CapacityRequirements.RequestsEntry") + proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1.Counter") + proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1.CounterSet") + proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.CounterSet.CountersEntry") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1.Device") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1.Device.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1.Device.CapacityEntry") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1.DeviceAttribute") + proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1.DeviceCapacity") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1.DeviceConstraint") + proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption") + proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption.CountersEntry") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult.ConsumedCapacityEntry") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1.DeviceSelector") + proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1.DeviceSubRequest") + proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1.DeviceTaint") + proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1.DeviceToleration") + proto.RegisterType((*ExactDeviceRequest)(nil), "k8s.io.api.resource.v1.ExactDeviceRequest") + proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1.NetworkDeviceData") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1.OpaqueDeviceConfiguration") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1.ResourceClaimList") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1.ResourceSliceSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/resource/v1/generated.proto", fileDescriptor_f4fc532aec02d243) +} + +var fileDescriptor_f4fc532aec02d243 = []byte{ + // 3028 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x6c, 0x24, 0x47, + 0xf5, 0x77, 0xcf, 0xcc, 0x8e, 0xc7, 0x6f, 0x6c, 0xaf, 0x5d, 0xbb, 0xeb, 0x4c, 0xfc, 0xff, 0xc7, + 0xe3, 0xf4, 0x92, 0xc4, 0x49, 0x76, 0xc7, 0x6b, 0x8b, 0x44, 0x51, 0x12, 0x10, 0x1e, 0xdb, 0x9b, + 0x38, 0xfb, 0x11, 0xa7, 0xc6, 0x6b, 0x36, 0x28, 0x84, 0xb4, 0x7b, 0xca, 0x76, 0xe3, 0x9e, 0xee, + 0x49, 0x77, 0x8d, 0x77, 0xcd, 0x29, 0xe2, 0x00, 0x57, 0x04, 0x12, 0x02, 0x24, 0x24, 0x94, 0x03, + 0x12, 0x17, 0x84, 0x38, 0x11, 0x04, 0x28, 0xc7, 0x08, 0x29, 0x28, 0x17, 0xa4, 0x20, 0xa1, 0x81, + 0x1d, 0x4e, 0x48, 0x08, 0x89, 0x0b, 0x07, 0x1f, 0x10, 0xaa, 0xea, 0xaa, 0xfe, 0x9a, 0x6e, 0x4f, + 0xdb, 0x59, 0xaf, 0x96, 0x9b, 0xe7, 0xd5, 0x7b, 0xbf, 0xaa, 0x7a, 0xf5, 0xbe, 0xea, 0x75, 0x19, + 0x9e, 0xdc, 0x7b, 0xc1, 0xad, 0x19, 0xf6, 0xbc, 0xd6, 0x36, 0xe6, 0x1d, 0xe2, 0xda, 0x1d, 0x47, + 0x27, 0xf3, 0xfb, 0x0b, 0xf3, 0x3b, 0xc4, 0x22, 0x8e, 0x46, 0x49, 0xb3, 0xd6, 0x76, 0x6c, 0x6a, + 0xa3, 0x29, 0x8f, 0xaf, 0xa6, 0xb5, 0x8d, 0x9a, 0xe4, 0xab, 0xed, 0x2f, 0x4c, 0x5f, 0xde, 0x31, + 0xe8, 0x6e, 0x67, 0xab, 0xa6, 0xdb, 0xad, 0xf9, 0x1d, 0x7b, 0xc7, 0x9e, 0xe7, 0xec, 0x5b, 0x9d, + 0x6d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x79, 0x30, 0xd3, 0x6a, 0x68, 0x3a, 0xdd, 0x76, 0x92, 0xa6, + 0x9a, 0xfe, 0x7c, 0xc0, 0xd3, 0xd2, 0xf4, 0x5d, 0xc3, 0x22, 0xce, 0xc1, 0x7c, 0x7b, 0x6f, 0x27, + 0xba, 0xc6, 0xe3, 0x48, 0xb9, 0xf3, 0x2d, 0x42, 0xb5, 0xa4, 0xb9, 0xe6, 0xd3, 0xa4, 0x9c, 0x8e, + 0x45, 0x8d, 0x56, 0xff, 0x34, 0xcf, 0x0f, 0x12, 0x70, 0xf5, 0x5d, 0xd2, 0xd2, 0xe2, 0x72, 0xea, + 0x87, 0x79, 0xb8, 0xb0, 0x64, 0x9a, 0xb6, 0xce, 0x68, 0x2b, 0x64, 0xdf, 0xd0, 0x49, 0x83, 0x6a, + 0xb4, 0xe3, 0xa2, 0x27, 0xa1, 0xd8, 0x74, 0x8c, 0x7d, 0xe2, 0x54, 0x94, 0x59, 0x65, 0x6e, 0xa4, + 0x3e, 0xfe, 0x51, 0xb7, 0x3a, 0xd4, 0xeb, 0x56, 0x8b, 0x2b, 0x9c, 0x8a, 0xc5, 0x28, 0x9a, 0x85, + 0x42, 0xdb, 0xb6, 0xcd, 0x4a, 0x8e, 0x73, 0x8d, 0x0a, 0xae, 0xc2, 0xba, 0x6d, 0x9b, 0x98, 0x8f, + 0x70, 0x24, 0x8e, 0x5c, 0xc9, 0xc7, 0x90, 0x38, 0x15, 0x8b, 0x51, 0xf4, 0x04, 0x0c, 0xbb, 0xbb, + 0x9a, 0x43, 0xd6, 0x56, 0x2a, 0xc3, 0x9c, 0xb1, 0xdc, 0xeb, 0x56, 0x87, 0x1b, 0x1e, 0x09, 0xcb, + 0x31, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x34, 0xa8, 0x61, 0x5b, 0x6e, 0xa5, 0x30, 0x9b, 0x9f, 0x2b, + 0x2f, 0xce, 0xd7, 0x02, 0x3b, 0xf0, 0xf7, 0x5f, 0x6b, 0xef, 0xed, 0x30, 0x82, 0x5b, 0x63, 0x6a, + 0xae, 0xed, 0x2f, 0xd4, 0x96, 0xa5, 0x5c, 0x1d, 0x89, 0x35, 0x80, 0x4f, 0x72, 0x71, 0x08, 0x16, + 0x5d, 0x83, 0x42, 0x53, 0xa3, 0x5a, 0xe5, 0xcc, 0xac, 0x32, 0x57, 0x5e, 0xbc, 0x9c, 0x0a, 0x2f, + 0xd4, 0x5b, 0xc3, 0xda, 0x9d, 0xd5, 0xbb, 0x94, 0x58, 0x2e, 0x03, 0x2f, 0x31, 0x05, 0xac, 0x68, + 0x54, 0xc3, 0x1c, 0x04, 0xbd, 0x05, 0x65, 0x8b, 0xd0, 0x3b, 0xb6, 0xb3, 0xc7, 0x88, 0x95, 0x22, + 0xc7, 0x7c, 0xba, 0x96, 0x6c, 0xba, 0xb5, 0x9b, 0x82, 0x95, 0x2b, 0x85, 0x09, 0xd4, 0xcf, 0xf6, + 0xba, 0xd5, 0xf2, 0xcd, 0x00, 0x01, 0x87, 0xe1, 0xd4, 0xdf, 0xe4, 0x60, 0x42, 0x1c, 0xa1, 0x61, + 0x5b, 0x98, 0xb8, 0x1d, 0x93, 0xa2, 0x37, 0x61, 0xd8, 0xd3, 0xaa, 0xcb, 0x8f, 0xaf, 0xbc, 0x58, + 0x4b, 0x9b, 0xce, 0x9b, 0x27, 0x0e, 0x50, 0x3f, 0x2b, 0x14, 0x34, 0xec, 0x8d, 0xbb, 0x58, 0xe2, + 0xa1, 0x4d, 0x18, 0xb5, 0xec, 0x26, 0x69, 0x10, 0x93, 0xe8, 0xd4, 0x76, 0xf8, 0xa1, 0x96, 0x17, + 0x67, 0xc3, 0xf8, 0xcc, 0x85, 0xf8, 0x56, 0x42, 0x7c, 0xf5, 0x89, 0x5e, 0xb7, 0x3a, 0x1a, 0xa6, + 0xe0, 0x08, 0x0e, 0xea, 0xc0, 0x39, 0xcd, 0x5f, 0xc5, 0x86, 0xd1, 0x22, 0x2e, 0xd5, 0x5a, 0x6d, + 0x71, 0x02, 0xcf, 0x64, 0x3b, 0x60, 0x26, 0x56, 0x7f, 0xa4, 0xd7, 0xad, 0x9e, 0x5b, 0xea, 0x87, + 0xc2, 0x49, 0xf8, 0xea, 0x2b, 0x30, 0xb9, 0xbc, 0x7a, 0x5d, 0x98, 0xbe, 0x5c, 0xcb, 0x22, 0x00, + 0xb9, 0xdb, 0x76, 0x88, 0xcb, 0xce, 0x53, 0x38, 0x80, 0x6f, 0x32, 0xab, 0xfe, 0x08, 0x0e, 0x71, + 0xa9, 0x1f, 0xe4, 0xe0, 0xc2, 0xb2, 0xd6, 0xd6, 0x74, 0x83, 0x1e, 0x60, 0xf2, 0x6e, 0x87, 0xb8, + 0x74, 0xdd, 0x36, 0x0d, 0xfd, 0x00, 0xdd, 0x62, 0x87, 0xb1, 0xad, 0x75, 0x4c, 0x9a, 0x70, 0x18, + 0x7d, 0xbb, 0x09, 0x4e, 0xe7, 0x8d, 0x8e, 0x66, 0x51, 0x83, 0x1e, 0x78, 0x8e, 0xb0, 0xe2, 0x41, + 0x60, 0x89, 0x85, 0x08, 0x94, 0xf7, 0x35, 0xd3, 0x68, 0x6e, 0x6a, 0x66, 0x87, 0xb8, 0x95, 0x3c, + 0xf7, 0x84, 0xe3, 0x42, 0x9f, 0x13, 0xbb, 0x2a, 0x6f, 0x06, 0x50, 0x38, 0x8c, 0x8b, 0xb6, 0x00, + 0xf8, 0x4f, 0xac, 0x59, 0x3b, 0xa4, 0x52, 0xe0, 0x1b, 0x58, 0x4c, 0xb3, 0xa6, 0x44, 0x05, 0x70, + 0xc9, 0xfa, 0x38, 0xd3, 0xdd, 0xa6, 0x8f, 0x84, 0x43, 0xa8, 0xea, 0x7b, 0x39, 0x98, 0x4e, 0x17, + 0x45, 0x6b, 0x90, 0x6f, 0x19, 0xd6, 0x09, 0x95, 0x37, 0xdc, 0xeb, 0x56, 0xf3, 0x37, 0x0c, 0x0b, + 0x33, 0x0c, 0x0e, 0xa5, 0xdd, 0xe5, 0xd1, 0xea, 0xa4, 0x50, 0xda, 0x5d, 0xcc, 0x30, 0xd0, 0x75, + 0x28, 0xb8, 0x94, 0xb4, 0x85, 0x03, 0x1c, 0x17, 0x8b, 0x07, 0x89, 0x06, 0x25, 0x6d, 0xcc, 0x51, + 0xd4, 0xff, 0x28, 0x70, 0x3e, 0xac, 0x02, 0xc3, 0x21, 0x2d, 0x62, 0x51, 0x17, 0x1d, 0x40, 0xc9, + 0xf1, 0x54, 0xc2, 0x7c, 0x99, 0x9d, 0xf1, 0x8b, 0x59, 0xb4, 0x2f, 0xe5, 0x6b, 0x42, 0x9f, 0xee, + 0xaa, 0x45, 0x9d, 0x83, 0xfa, 0xe3, 0xe2, 0xbc, 0x4b, 0x92, 0xfc, 0xcd, 0xbf, 0x54, 0xc7, 0xde, + 0xe8, 0x68, 0xa6, 0xb1, 0x6d, 0x90, 0xe6, 0x4d, 0xad, 0x45, 0xb0, 0x3f, 0xdd, 0xf4, 0x1e, 0x8c, + 0x45, 0xa4, 0xd1, 0x04, 0xe4, 0xf7, 0xc8, 0x81, 0xe7, 0x10, 0x98, 0xfd, 0x89, 0x56, 0xe0, 0xcc, + 0x3e, 0xb3, 0x93, 0x93, 0x69, 0x14, 0x7b, 0xc2, 0x2f, 0xe6, 0x5e, 0x50, 0xd4, 0xb7, 0x61, 0x78, + 0xd9, 0xee, 0x58, 0x94, 0x38, 0xa8, 0x21, 0x41, 0x4f, 0x76, 0xe2, 0x63, 0x62, 0x8f, 0x67, 0xb8, + 0x05, 0x8b, 0x39, 0xd4, 0x7f, 0x28, 0x00, 0x62, 0x82, 0x06, 0xa1, 0x2c, 0x6f, 0x59, 0x5a, 0x8b, + 0x08, 0xe7, 0xf6, 0xf3, 0x16, 0xd7, 0x00, 0x1f, 0x41, 0x6f, 0x43, 0x49, 0xf7, 0xf8, 0xdd, 0x4a, + 0x8e, 0x2b, 0xfe, 0x4a, 0xaa, 0xe2, 0x7d, 0x5c, 0xf9, 0xa7, 0x50, 0xf7, 0x84, 0x54, 0xb7, 0x24, + 0x63, 0x1f, 0x73, 0xfa, 0x2d, 0x18, 0x8b, 0x30, 0x27, 0x68, 0xf7, 0xb9, 0xa8, 0x76, 0xab, 0x03, + 0xe6, 0x0f, 0xab, 0xf3, 0xdf, 0x25, 0x10, 0x09, 0x36, 0xc3, 0x56, 0x5d, 0x00, 0x8d, 0x52, 0xc7, + 0xd8, 0xea, 0x50, 0x22, 0x37, 0x3b, 0x20, 0x63, 0xd4, 0x96, 0x7c, 0x01, 0x6f, 0xab, 0x17, 0x65, + 0x7c, 0x0c, 0x06, 0xfa, 0x6d, 0x2b, 0x34, 0x0d, 0xda, 0x83, 0x92, 0x2e, 0x0c, 0x56, 0x04, 0xaf, + 0x4b, 0x03, 0xa6, 0x94, 0xf6, 0x1d, 0x33, 0x65, 0x49, 0x4e, 0x30, 0x65, 0x39, 0x01, 0xda, 0x87, + 0x09, 0xdd, 0xb6, 0xdc, 0x4e, 0x8b, 0xb8, 0x52, 0xe9, 0xa2, 0x76, 0xb8, 0x72, 0xf4, 0xa4, 0x82, + 0x7b, 0x99, 0x0b, 0xb7, 0x79, 0xf1, 0x50, 0x11, 0x13, 0x4f, 0x2c, 0xc7, 0x10, 0x71, 0xdf, 0x1c, + 0x68, 0x0e, 0x4a, 0x2c, 0xcb, 0xb1, 0xd5, 0xf0, 0x54, 0x36, 0x52, 0x1f, 0x65, 0x4b, 0xbe, 0x29, + 0x68, 0xd8, 0x1f, 0xed, 0xcb, 0xab, 0xc5, 0xfb, 0x94, 0x57, 0xe7, 0xa0, 0xa4, 0x99, 0x26, 0x63, + 0x70, 0x79, 0x5d, 0x55, 0xf2, 0x56, 0xb0, 0x24, 0x68, 0xd8, 0x1f, 0x45, 0xd7, 0xa0, 0x48, 0x35, + 0xc3, 0xa2, 0x6e, 0xa5, 0xc4, 0x35, 0x73, 0xf1, 0x68, 0xcd, 0x6c, 0x30, 0xde, 0xa0, 0x9a, 0xe3, + 0x3f, 0x5d, 0x2c, 0x20, 0xd0, 0x02, 0x94, 0xb7, 0x0c, 0xab, 0xe9, 0x6e, 0xd8, 0x0c, 0xbc, 0x32, + 0xc2, 0x67, 0xe6, 0x95, 0x4c, 0x3d, 0x20, 0xe3, 0x30, 0x0f, 0x5a, 0x86, 0x49, 0xf6, 0xd3, 0xb0, + 0x76, 0x82, 0xaa, 0xac, 0x02, 0xb3, 0xf9, 0xb9, 0x91, 0xfa, 0x85, 0x5e, 0xb7, 0x3a, 0x59, 0x8f, + 0x0f, 0xe2, 0x7e, 0x7e, 0x74, 0x1b, 0x2a, 0x82, 0x78, 0x55, 0x33, 0xcc, 0x8e, 0x43, 0x42, 0x58, + 0x65, 0x8e, 0xf5, 0xff, 0xbd, 0x6e, 0xb5, 0x52, 0x4f, 0xe1, 0xc1, 0xa9, 0xd2, 0x0c, 0x99, 0x15, + 0x10, 0x77, 0x6e, 0x74, 0x4c, 0x6a, 0xb4, 0xcd, 0x50, 0xcd, 0xe4, 0x56, 0x46, 0xf9, 0xf6, 0x38, + 0xf2, 0x52, 0x0a, 0x0f, 0x4e, 0x95, 0x9e, 0xde, 0x86, 0xb3, 0x31, 0x6f, 0x4a, 0x88, 0x05, 0x5f, + 0x88, 0xc6, 0x82, 0xa7, 0x06, 0x14, 0x74, 0x12, 0x2f, 0x14, 0x13, 0xa6, 0x75, 0x18, 0x8b, 0xb8, + 0x50, 0xc2, 0x2c, 0x2f, 0x47, 0x67, 0x79, 0x72, 0x80, 0x73, 0xc8, 0x84, 0x13, 0x0a, 0x3c, 0xdf, + 0xce, 0xc1, 0x63, 0xf1, 0xa2, 0x72, 0xd9, 0xb6, 0xb6, 0x8d, 0x9d, 0x8e, 0xc3, 0x7f, 0xa0, 0x2f, + 0x41, 0xd1, 0x03, 0x12, 0x11, 0x69, 0x4e, 0x9a, 0x50, 0x83, 0x53, 0x0f, 0xbb, 0xd5, 0xa9, 0xb8, + 0xa8, 0x37, 0x82, 0x85, 0x1c, 0xb3, 0x69, 0x3f, 0x27, 0xe6, 0xf8, 0xa1, 0x8e, 0x86, 0x73, 0x5a, + 0x90, 0xc2, 0xd0, 0x37, 0xe0, 0x5c, 0x53, 0xf8, 0x71, 0x68, 0x09, 0x22, 0x67, 0x3f, 0x3b, 0xc8, + 0xf5, 0x43, 0x22, 0xf5, 0xff, 0x13, 0xab, 0x3c, 0x97, 0x30, 0x88, 0x93, 0x26, 0x51, 0xff, 0xa4, + 0xc0, 0x54, 0x72, 0x79, 0x8d, 0xde, 0x81, 0x61, 0x87, 0xff, 0x25, 0x73, 0xfa, 0x73, 0x47, 0x2f, + 0x45, 0xec, 0x2c, 0xbd, 0x4c, 0xf7, 0x7e, 0xbb, 0x58, 0xc2, 0xa2, 0xaf, 0x42, 0x51, 0xe7, 0xab, + 0x11, 0xe1, 0xfc, 0xb9, 0xac, 0x17, 0x80, 0xe8, 0xae, 0x7d, 0xf7, 0xf6, 0xc8, 0x58, 0x80, 0xaa, + 0x3f, 0x53, 0xe0, 0x6c, 0xcc, 0xd2, 0xd0, 0x0c, 0xe4, 0x0d, 0x8b, 0x72, 0xcb, 0xc9, 0x7b, 0x07, + 0xb2, 0x66, 0x51, 0x2f, 0x07, 0xb3, 0x01, 0xf4, 0x38, 0x14, 0xb6, 0xd8, 0x55, 0x31, 0xcf, 0x9d, + 0x65, 0xac, 0xd7, 0xad, 0x8e, 0xd4, 0x6d, 0xdb, 0xf4, 0x38, 0xf8, 0x10, 0x7a, 0x0a, 0x8a, 0x2e, + 0x75, 0x0c, 0x6b, 0x87, 0x17, 0x9a, 0x23, 0x5e, 0xc0, 0x68, 0x70, 0x8a, 0xc7, 0x26, 0x86, 0xd1, + 0x33, 0x30, 0xbc, 0x4f, 0x1c, 0x5e, 0x9e, 0x7b, 0x61, 0x95, 0x87, 0xc1, 0x4d, 0x8f, 0xe4, 0xb1, + 0x4a, 0x06, 0xf5, 0x63, 0x05, 0xc6, 0xa3, 0xf6, 0x7a, 0x2a, 0x15, 0x06, 0xda, 0x86, 0x31, 0x27, + 0x5c, 0xbc, 0x0a, 0x1f, 0xba, 0x7c, 0xac, 0x62, 0xb9, 0x3e, 0xd9, 0xeb, 0x56, 0xc7, 0xa2, 0x45, + 0x70, 0x14, 0x56, 0xfd, 0x71, 0x0e, 0xca, 0x62, 0x3f, 0xa6, 0x66, 0xb4, 0x50, 0xa3, 0xaf, 0x42, + 0x7c, 0x22, 0x93, 0x35, 0x05, 0xd5, 0x49, 0x82, 0xe3, 0x7c, 0x0d, 0xca, 0x2c, 0x99, 0x51, 0xc7, + 0xcb, 0x08, 0x9e, 0x11, 0xcd, 0x0d, 0x74, 0x18, 0x21, 0x10, 0xdc, 0x2b, 0x02, 0x9a, 0x8b, 0xc3, + 0x88, 0xe8, 0xb6, 0x6f, 0xa0, 0xf9, 0x4c, 0x79, 0x98, 0x6d, 0x35, 0x9b, 0x6d, 0x7e, 0xa8, 0x40, + 0x25, 0x4d, 0x28, 0x12, 0x3a, 0x94, 0x93, 0x84, 0x8e, 0xdc, 0x83, 0x08, 0x1d, 0xbf, 0x56, 0x42, + 0x47, 0xec, 0xba, 0xe8, 0x1d, 0x28, 0xb1, 0x3b, 0x2e, 0xef, 0x49, 0x78, 0x26, 0x7b, 0x25, 0xdb, + 0x8d, 0xf8, 0xf5, 0xad, 0xaf, 0x13, 0x9d, 0xde, 0x20, 0x54, 0x0b, 0x2e, 0xb0, 0x01, 0x0d, 0xfb, + 0xa8, 0x68, 0x0d, 0x0a, 0x6e, 0x9b, 0xe8, 0xd9, 0xb2, 0x0b, 0x5f, 0x54, 0xa3, 0x4d, 0xf4, 0xa0, + 0x9a, 0x64, 0xbf, 0x30, 0x87, 0x50, 0xbf, 0x1f, 0xd6, 0xbf, 0xeb, 0x46, 0xf5, 0x9f, 0xa2, 0x55, + 0xe5, 0x41, 0x68, 0xf5, 0x03, 0x3f, 0x68, 0xf1, 0x85, 0x5d, 0x37, 0x5c, 0x8a, 0xde, 0xea, 0xd3, + 0x6c, 0x2d, 0x9b, 0x66, 0x99, 0x34, 0xd7, 0xab, 0xef, 0x45, 0x92, 0x12, 0xd2, 0xea, 0xab, 0x70, + 0xc6, 0xa0, 0xa4, 0x25, 0xfd, 0xe7, 0x62, 0x06, 0xb5, 0x06, 0xc1, 0x65, 0x8d, 0x49, 0x62, 0x0f, + 0x40, 0xfd, 0x6e, 0x2e, 0xb2, 0x76, 0xa6, 0x6e, 0xf4, 0x65, 0x18, 0x71, 0x45, 0x99, 0x27, 0x3d, + 0x7f, 0x40, 0xc2, 0xf6, 0xab, 0xc6, 0x49, 0x31, 0xc9, 0x88, 0xa4, 0xb8, 0x38, 0xc0, 0x0a, 0xf9, + 0x66, 0x2e, 0xa3, 0x6f, 0xc6, 0x8e, 0x39, 0xcd, 0x37, 0xd1, 0x75, 0x38, 0x4f, 0xee, 0x52, 0x62, + 0x35, 0x49, 0x13, 0x0b, 0x1c, 0x5e, 0x1b, 0x7b, 0xe1, 0xbe, 0xd2, 0xeb, 0x56, 0xcf, 0xaf, 0x26, + 0x8c, 0xe3, 0x44, 0x29, 0xd5, 0x84, 0xa4, 0xc3, 0x47, 0xb7, 0xa0, 0x68, 0xb7, 0xb5, 0x77, 0xfd, + 0xf0, 0xbe, 0x90, 0xb6, 0xfc, 0xd7, 0x39, 0x57, 0x92, 0x71, 0x01, 0x5b, 0xbb, 0x37, 0x8c, 0x05, + 0x98, 0xfa, 0x77, 0x05, 0x26, 0xe2, 0x81, 0xee, 0x18, 0xf1, 0x64, 0x1d, 0xc6, 0x5b, 0x1a, 0xd5, + 0x77, 0xfd, 0x84, 0x29, 0x7a, 0xa6, 0x73, 0xbd, 0x6e, 0x75, 0xfc, 0x46, 0x64, 0xe4, 0xb0, 0x5b, + 0x45, 0x57, 0x3b, 0xa6, 0x79, 0x10, 0xbd, 0xce, 0xc4, 0xe4, 0xd1, 0x9b, 0x30, 0xd9, 0x34, 0x5c, + 0x6a, 0x58, 0x3a, 0x0d, 0x40, 0xbd, 0x26, 0xeb, 0xb3, 0xac, 0x60, 0x5e, 0x89, 0x0f, 0xa6, 0xe0, + 0xf6, 0xa3, 0xa8, 0x3f, 0xca, 0xf9, 0x3e, 0xdc, 0x77, 0x01, 0x42, 0x8b, 0x00, 0xba, 0x7f, 0xe3, + 0x8d, 0xb7, 0xc7, 0x82, 0xbb, 0x30, 0x0e, 0x71, 0x21, 0xb3, 0xef, 0x36, 0xfd, 0xc5, 0xe3, 0x5e, + 0xbc, 0x1e, 0x9a, 0xbb, 0xf5, 0x3f, 0x15, 0x18, 0x8b, 0x64, 0xd2, 0x0c, 0x57, 0xec, 0x37, 0x60, + 0x98, 0xdc, 0xd5, 0x74, 0x6a, 0xca, 0xb2, 0xe0, 0x99, 0xb4, 0x09, 0x57, 0x19, 0x5b, 0x34, 0x51, + 0xf3, 0x06, 0xe0, 0xaa, 0x27, 0x8e, 0x25, 0x0e, 0xda, 0x85, 0xf1, 0x6d, 0xc3, 0x71, 0xe9, 0xd2, + 0xbe, 0x66, 0x98, 0xda, 0x96, 0x49, 0x44, 0x26, 0x1d, 0x90, 0xa5, 0x1b, 0x9d, 0x2d, 0x89, 0x3b, + 0x25, 0x16, 0x3a, 0x7e, 0x35, 0x82, 0x83, 0x63, 0xb8, 0xea, 0x1f, 0x8b, 0xb2, 0xa6, 0x4f, 0x29, + 0x44, 0xd1, 0xd3, 0xac, 0xa0, 0xe5, 0x43, 0x42, 0x07, 0xa1, 0xca, 0x94, 0x93, 0xb1, 0x1c, 0x0f, + 0x7d, 0x59, 0xc8, 0x65, 0xfa, 0xb2, 0x90, 0xcf, 0xf0, 0x65, 0xa1, 0x70, 0xe4, 0x97, 0x85, 0x05, + 0x28, 0x6b, 0xcd, 0x96, 0x61, 0x2d, 0xe9, 0x3a, 0x71, 0x5d, 0x5e, 0x30, 0x8a, 0xbb, 0xe8, 0x52, + 0x40, 0xc6, 0x61, 0x1e, 0x56, 0xfe, 0x50, 0xdb, 0x24, 0x8e, 0xb8, 0xdf, 0x15, 0xb3, 0x28, 0x76, + 0xc3, 0x17, 0x08, 0xca, 0x9f, 0x80, 0xe6, 0xe2, 0x30, 0x62, 0xf2, 0x65, 0x77, 0xf8, 0x3e, 0x5e, + 0x76, 0x4b, 0x9f, 0xe9, 0xb2, 0xfb, 0x5a, 0xf0, 0x31, 0x66, 0x84, 0xeb, 0xf6, 0x4a, 0xe8, 0x63, + 0xcc, 0x61, 0xb7, 0xfa, 0x78, 0xda, 0x07, 0x27, 0x7a, 0xd0, 0x26, 0x6e, 0xed, 0x56, 0xf8, 0x8b, + 0xcd, 0xfb, 0x8a, 0xdf, 0x7c, 0x69, 0xca, 0x9a, 0x97, 0xdf, 0xeb, 0xcb, 0x8b, 0xd7, 0x4e, 0x74, + 0xed, 0xa9, 0x2d, 0xc7, 0xd0, 0xbc, 0x80, 0xf0, 0x74, 0xac, 0x2f, 0xd3, 0x4c, 0x6f, 0x0c, 0xf5, + 0xad, 0x67, 0xda, 0x85, 0x0b, 0x89, 0xa8, 0xa7, 0xda, 0xf3, 0xdc, 0x94, 0x17, 0x13, 0xbf, 0x5b, + 0xb3, 0x02, 0x79, 0x9d, 0x98, 0x22, 0x6f, 0xa5, 0x7e, 0x23, 0xea, 0xfb, 0x62, 0xe1, 0xb5, 0xa6, + 0x97, 0x57, 0xaf, 0x63, 0x26, 0xae, 0x7e, 0xab, 0x20, 0x33, 0x55, 0xe0, 0xec, 0x19, 0x62, 0xd4, + 0x12, 0x9c, 0x6d, 0x06, 0x09, 0x9d, 0xe7, 0x65, 0xcf, 0x45, 0x1f, 0x11, 0xcc, 0xe1, 0x0a, 0x84, + 0xcb, 0xc5, 0xf9, 0xa3, 0x25, 0x49, 0xfe, 0x3e, 0x96, 0x24, 0x9b, 0x30, 0x1e, 0x7c, 0xbe, 0xb9, + 0x61, 0x37, 0xa5, 0xcf, 0xd7, 0x64, 0x08, 0x5b, 0x8a, 0x8c, 0x1e, 0x76, 0xab, 0xe7, 0xe3, 0x37, + 0x5b, 0x46, 0xc7, 0x31, 0x14, 0x74, 0x11, 0xce, 0xf0, 0xac, 0xc1, 0xa3, 0x42, 0x3e, 0x28, 0xbe, + 0x78, 0xd8, 0xc7, 0xde, 0xd8, 0xe9, 0x47, 0x83, 0xcd, 0x50, 0x2f, 0x74, 0x98, 0x9f, 0xfd, 0xa5, + 0xe3, 0x34, 0xf9, 0xbd, 0x9a, 0xc3, 0x1f, 0xf1, 0xb1, 0xd4, 0x7f, 0xf9, 0xf7, 0x08, 0xde, 0x9e, + 0x43, 0x8f, 0x85, 0x8c, 0xb9, 0x5e, 0x16, 0xcb, 0xca, 0x5f, 0x23, 0x07, 0x9e, 0x65, 0x5f, 0x0c, + 0x5b, 0xf6, 0x48, 0xca, 0x35, 0xf7, 0x25, 0x28, 0x92, 0xed, 0x6d, 0xa2, 0x53, 0x11, 0x99, 0x65, + 0xe3, 0xb7, 0xb8, 0xca, 0xa9, 0x87, 0xac, 0xf0, 0x08, 0xa6, 0xf4, 0x88, 0x58, 0x88, 0x30, 0xfb, + 0xa0, 0x46, 0x8b, 0x2c, 0x35, 0x9b, 0xa4, 0x29, 0x3e, 0x26, 0x1d, 0xe7, 0xdb, 0x1e, 0x6f, 0x1a, + 0x6c, 0x48, 0x00, 0x1c, 0x60, 0xbd, 0x58, 0xfa, 0xc1, 0x4f, 0xaa, 0x43, 0xef, 0xfd, 0x79, 0x76, + 0x48, 0x7d, 0x3f, 0x27, 0x8d, 0x3f, 0x50, 0xf7, 0xa0, 0x8d, 0xbf, 0x0a, 0x25, 0xbb, 0xcd, 0x78, + 0x6d, 0x99, 0x95, 0x2e, 0xc9, 0xea, 0xe2, 0x75, 0x41, 0x3f, 0xec, 0x56, 0x2b, 0x71, 0x58, 0x39, + 0x86, 0x7d, 0xe9, 0x40, 0x85, 0xf9, 0x4c, 0x2a, 0x2c, 0x1c, 0x5f, 0x85, 0xcb, 0x30, 0x19, 0x98, + 0x4e, 0x83, 0xe8, 0xb6, 0xd5, 0x74, 0x85, 0xf5, 0xf2, 0xcc, 0xb1, 0x11, 0x1f, 0xc4, 0xfd, 0xfc, + 0xea, 0x0f, 0x0b, 0x80, 0xfa, 0x0b, 0x8d, 0xa4, 0x08, 0xa0, 0x7c, 0x96, 0x08, 0x90, 0x3b, 0xd5, + 0x08, 0x90, 0xbf, 0xbf, 0x11, 0xa0, 0x70, 0x44, 0x04, 0x78, 0x18, 0x4b, 0x88, 0xd3, 0x0a, 0x1a, + 0x3f, 0x57, 0x60, 0xb2, 0xef, 0x15, 0x02, 0x7a, 0x09, 0xc6, 0x0c, 0x56, 0x08, 0x6f, 0x6b, 0xe2, + 0xca, 0xe6, 0x19, 0xc6, 0x05, 0xb1, 0xcc, 0xb1, 0xb5, 0xf0, 0x20, 0x8e, 0xf2, 0xa2, 0x47, 0x21, + 0x6f, 0xb4, 0x65, 0xaf, 0x96, 0xe7, 0xaa, 0xb5, 0x75, 0x17, 0x33, 0x1a, 0x33, 0xb9, 0x5d, 0xcd, + 0x69, 0xde, 0xd1, 0x1c, 0xe6, 0xc9, 0x0e, 0xd3, 0x6e, 0x3e, 0x6a, 0x72, 0xaf, 0x46, 0x87, 0x71, + 0x9c, 0x5f, 0xfd, 0xa9, 0x02, 0x8f, 0xa6, 0x5e, 0xe5, 0x32, 0xbf, 0x64, 0xd1, 0x00, 0xda, 0x9a, + 0xa3, 0xb5, 0x88, 0xb8, 0xa3, 0x9c, 0xe0, 0xe5, 0x87, 0x7f, 0x09, 0x5a, 0xf7, 0x81, 0x70, 0x08, + 0x54, 0xfd, 0x5e, 0x0e, 0xc6, 0xe4, 0x05, 0xd6, 0xeb, 0xdd, 0x9d, 0x7e, 0x63, 0xe7, 0x5a, 0xa4, + 0xb1, 0x93, 0x5a, 0x52, 0x44, 0x96, 0x95, 0xd6, 0xda, 0x41, 0x0d, 0x28, 0xba, 0xfc, 0x7d, 0xd0, + 0xa0, 0x0e, 0x7a, 0x14, 0x8e, 0x8b, 0x04, 0x8a, 0xf7, 0x7e, 0x63, 0x01, 0xa5, 0xf6, 0x14, 0x98, + 0x89, 0xf0, 0x8b, 0x42, 0xcc, 0xc1, 0x64, 0x9b, 0x38, 0xc4, 0xd2, 0x09, 0xba, 0x04, 0x25, 0xad, + 0x6d, 0xbc, 0xe2, 0xd8, 0x9d, 0xb6, 0x38, 0x45, 0xff, 0xf6, 0xb7, 0xb4, 0xbe, 0xc6, 0xe9, 0xd8, + 0xe7, 0x60, 0xdc, 0x72, 0x2d, 0xc2, 0x96, 0x42, 0x9d, 0x4e, 0x8f, 0x8e, 0x7d, 0x0e, 0xbf, 0x2e, + 0x2a, 0xa4, 0xd6, 0x45, 0x75, 0xc8, 0x77, 0x8c, 0xa6, 0x68, 0x34, 0x5f, 0x91, 0xc9, 0xe3, 0x56, + 0xd6, 0x42, 0x98, 0x09, 0xab, 0xbf, 0x55, 0x60, 0x32, 0xb2, 0xc9, 0x07, 0xd0, 0x7d, 0x7a, 0x2d, + 0xda, 0x7d, 0x7a, 0x22, 0xd3, 0x61, 0xa5, 0xf4, 0x9f, 0xf4, 0xd8, 0xf2, 0x79, 0x03, 0xea, 0x66, + 0xfc, 0x99, 0xd1, 0xc5, 0x0c, 0x4d, 0xdc, 0xf4, 0xb7, 0x45, 0xea, 0xaf, 0x72, 0x70, 0x2e, 0xc1, + 0x72, 0xd0, 0x6d, 0x80, 0x20, 0x68, 0x8b, 0xa9, 0x52, 0x23, 0x69, 0xdf, 0x47, 0x12, 0xfe, 0xf2, + 0x24, 0x44, 0x0d, 0x61, 0xa1, 0x16, 0x94, 0x1d, 0xe2, 0x12, 0x67, 0x9f, 0x34, 0xaf, 0xf2, 0xdc, + 0xcf, 0x14, 0xf5, 0x7c, 0x26, 0x45, 0xf5, 0x59, 0x69, 0x10, 0xb2, 0x71, 0x00, 0x89, 0xc3, 0xf8, + 0xe8, 0x76, 0xa0, 0x30, 0xef, 0xeb, 0xf3, 0xe5, 0x01, 0xbb, 0x88, 0xbe, 0xca, 0x3b, 0x42, 0x75, + 0x7f, 0x50, 0xe0, 0x42, 0x64, 0x79, 0x1b, 0xa4, 0xd5, 0x36, 0x35, 0x4a, 0x1e, 0x40, 0x88, 0x69, + 0x44, 0x42, 0xcc, 0x42, 0x26, 0xed, 0xc9, 0xe5, 0xa5, 0x76, 0x91, 0x3f, 0x56, 0xe0, 0xd1, 0x44, + 0x89, 0x07, 0xe0, 0x38, 0x38, 0xea, 0x38, 0x97, 0x8f, 0xb5, 0xa3, 0x14, 0x07, 0xfa, 0x7d, 0xda, + 0x7e, 0xb8, 0x27, 0xfd, 0x6f, 0xe5, 0x01, 0xf5, 0x17, 0x0a, 0x8c, 0x4a, 0xce, 0x75, 0xdb, 0x36, + 0x33, 0x5c, 0x2e, 0x17, 0x01, 0xc4, 0xeb, 0x53, 0xf9, 0x15, 0x25, 0x1f, 0xac, 0xf8, 0x15, 0x7f, + 0x04, 0x87, 0xb8, 0xd0, 0x6b, 0x80, 0xe4, 0xda, 0x1a, 0xa6, 0xec, 0x09, 0xf2, 0x90, 0x9e, 0xaf, + 0x4f, 0x0b, 0x59, 0x84, 0xfb, 0x38, 0x70, 0x82, 0x94, 0xfa, 0x3b, 0x25, 0xc8, 0xbd, 0x9c, 0xfc, + 0xf0, 0xe9, 0x9c, 0x2f, 0x2b, 0x55, 0xe7, 0xe1, 0x0c, 0xc2, 0x39, 0x1f, 0xc2, 0x0c, 0xc2, 0xd7, + 0x95, 0xe2, 0x00, 0xbf, 0x2c, 0xc4, 0xd6, 0xcf, 0x0d, 0x3f, 0x6b, 0x75, 0x76, 0x35, 0xf4, 0xce, + 0xb8, 0xbc, 0xf8, 0xb9, 0x41, 0x0b, 0x61, 0x46, 0x99, 0xd8, 0x33, 0x0c, 0x3f, 0xc8, 0xc9, 0x1f, + 0xeb, 0x41, 0x4e, 0xe1, 0x14, 0x1e, 0xe4, 0x9c, 0x39, 0xf2, 0x41, 0xce, 0x5a, 0x90, 0x2d, 0xbc, + 0xdb, 0xc3, 0xcc, 0xd1, 0xe9, 0xf5, 0x88, 0x57, 0xbb, 0x18, 0xa6, 0xda, 0xc4, 0xf1, 0xc8, 0xc1, + 0xda, 0x98, 0x27, 0x7a, 0x6f, 0x82, 0xa6, 0x7b, 0xdd, 0xea, 0xd4, 0x7a, 0x22, 0x07, 0x4e, 0x91, + 0x44, 0x5b, 0x30, 0xce, 0x5b, 0x7c, 0x4d, 0xff, 0x45, 0x95, 0xf7, 0x6e, 0x48, 0x1d, 0xfc, 0x4c, + 0x2e, 0xe8, 0x3c, 0x37, 0x22, 0x08, 0x38, 0x86, 0x58, 0x7f, 0xf9, 0xa3, 0x7b, 0x33, 0x43, 0x9f, + 0xdc, 0x9b, 0x19, 0xfa, 0xf4, 0xde, 0xcc, 0xd0, 0x7b, 0xbd, 0x19, 0xe5, 0xa3, 0xde, 0x8c, 0xf2, + 0x49, 0x6f, 0x46, 0xf9, 0xb4, 0x37, 0xa3, 0xfc, 0xb5, 0x37, 0xa3, 0x7c, 0xe7, 0x6f, 0x33, 0x43, + 0x5f, 0x99, 0x4a, 0xfe, 0x77, 0x81, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x0c, 0xec, 0x16, + 0x47, 0x30, 0x00, 0x00, +} + +func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ShareID != nil { + i -= len(*m.ShareID) + copy(dAtA[i:], *m.ShareID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID))) + i-- + dAtA[i] = 0x3a + } + if m.NetworkData != nil { + { + size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Data != nil { + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x1a + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllocationTimestamp != nil { + { + size, err := m.AllocationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CapacityRequestPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapacityRequestPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CapacityRequestPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValidRange != nil { + { + size, err := m.ValidRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ValidValues) > 0 { + for iNdEx := len(m.ValidValues) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CapacityRequestPolicyRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapacityRequestPolicyRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CapacityRequestPolicyRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Step != nil { + { + size, err := m.Step.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Max != nil { + { + size, err := m.Max.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Min != nil { + { + size, err := m.Min.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CapacityRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapacityRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CapacityRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[QualifiedName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CounterSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CounterSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Counters) > 0 { + keysForCounters := make([]string, 0, len(m.Counters)) + for k := range m.Counters { + keysForCounters = append(keysForCounters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Counters[string(keysForCounters[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCounters[iNdEx]) + copy(dAtA[i:], keysForCounters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllowMultipleAllocations != nil { + i-- + if *m.AllowMultipleAllocations { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if len(m.BindingFailureConditions) > 0 { + for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingFailureConditions[iNdEx]) + copy(dAtA[i:], m.BindingFailureConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.BindingConditions) > 0 { + for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingConditions[iNdEx]) + copy(dAtA[i:], m.BindingConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if m.BindsToNode != nil { + i-- + if *m.BindsToNode { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.AllNodes != nil { + i-- + if *m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x2a + } + if len(m.ConsumesCounters) > 0 { + for iNdEx := len(m.ConsumesCounters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConsumesCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VersionValue != nil { + i -= len(*m.VersionValue) + copy(dAtA[i:], *m.VersionValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) + i-- + dAtA[i] = 0x2a + } + if m.StringValue != nil { + i -= len(*m.StringValue) + copy(dAtA[i:], *m.StringValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) + i-- + dAtA[i] = 0x22 + } + if m.BoolValue != nil { + i-- + if *m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *DeviceCapacity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceCapacity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RequestPolicy != nil { + { + size, err := m.RequestPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExtendedResourceName != nil { + i -= len(*m.ExtendedResourceName) + copy(dAtA[i:], *m.ExtendedResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExtendedResourceName))) + i-- + dAtA[i] = 0x22 + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DistinctAttribute != nil { + i -= len(*m.DistinctAttribute) + copy(dAtA[i:], *m.DistinctAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DistinctAttribute))) + i-- + dAtA[i] = 0x1a + } + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceCounterConsumption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceCounterConsumption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Counters) > 0 { + keysForCounters := make([]string, 0, len(m.Counters)) + for k := range m.Counters { + keysForCounters = append(keysForCounters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Counters[string(keysForCounters[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCounters[iNdEx]) + copy(dAtA[i:], keysForCounters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CounterSet) + copy(dAtA[i:], m.CounterSet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CounterSet))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FirstAvailable) > 0 { + for iNdEx := len(m.FirstAvailable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FirstAvailable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Exactly != nil { + { + size, err := m.Exactly.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConsumedCapacity) > 0 { + keysForConsumedCapacity := make([]string, 0, len(m.ConsumedCapacity)) + for k := range m.ConsumedCapacity { + keysForConsumedCapacity = append(keysForConsumedCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity) + for iNdEx := len(keysForConsumedCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.ConsumedCapacity[QualifiedName(keysForConsumedCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForConsumedCapacity[iNdEx]) + copy(dAtA[i:], keysForConsumedCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForConsumedCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.ShareID != nil { + i -= len(*m.ShareID) + copy(dAtA[i:], *m.ShareID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID))) + i-- + dAtA[i] = 0x4a + } + if len(m.BindingFailureConditions) > 0 { + for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingFailureConditions[iNdEx]) + copy(dAtA[i:], m.BindingFailureConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.BindingConditions) > 0 { + for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingConditions[iNdEx]) + copy(dAtA[i:], m.BindingConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x22 + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x1a + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceSubRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSubRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSubRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Capacity != nil { + { + size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceTaint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x1a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceToleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceToleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceToleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 + } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExactDeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExactDeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExactDeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Capacity != nil { + { + size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x20 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x1a + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.HardwareAddress) + copy(dAtA[i:], m.HardwareAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) + i-- + dAtA[i] = 0x1a + if len(m.IPs) > 0 { + for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPs[iNdEx]) + copy(dAtA[i:], m.IPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.InterfaceName) + copy(dAtA[i:], m.InterfaceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SharedCounters) > 0 { + for iNdEx := len(m.SharedCounters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SharedCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.PerDeviceNodeSelection != nil { + i-- + if *m.PerDeviceNodeSelection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.AllNodes != nil { + i-- + if *m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocatedDeviceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NetworkData != nil { + l = m.NetworkData.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ShareID != nil { + l = len(*m.ShareID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AllocationTimestamp != nil { + l = m.AllocationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CapacityRequestPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidValues) > 0 { + for _, e := range m.ValidValues { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ValidRange != nil { + l = m.ValidRange.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CapacityRequestPolicyRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Min != nil { + l = m.Min.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Max != nil { + l = m.Max.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Step != nil { + l = m.Step.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CapacityRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Counter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CounterSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Counters) > 0 { + for k, v := range m.Counters { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.ConsumesCounters) > 0 { + for _, e := range m.ConsumesCounters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AllNodes != nil { + n += 2 + } + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.BindsToNode != nil { + n += 2 + } + if len(m.BindingConditions) > 0 { + for _, s := range m.BindingConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.BindingFailureConditions) > 0 { + for _, s := range m.BindingFailureConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowMultipleAllocations != nil { + n += 2 + } + return n +} + +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceCapacity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestPolicy != nil { + l = m.RequestPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ExtendedResourceName != nil { + l = len(*m.ExtendedResourceName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DistinctAttribute != nil { + l = len(*m.DistinctAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceCounterConsumption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CounterSet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Counters) > 0 { + for k, v := range m.Counters { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Exactly != nil { + l = m.Exactly.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.FirstAvailable) > 0 { + for _, e := range m.FirstAvailable { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceRequestAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if m.AdminAccess != nil { + n += 2 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.BindingConditions) > 0 { + for _, s := range m.BindingConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.BindingFailureConditions) > 0 { + for _, s := range m.BindingFailureConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ShareID != nil { + l = len(*m.ShareID) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConsumedCapacity) > 0 { + for k, v := range m.ConsumedCapacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceSubRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Capacity != nil { + l = m.Capacity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceTaint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceToleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *ExactDeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if m.AdminAccess != nil { + n += 2 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Capacity != nil { + l = m.Capacity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkDeviceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InterfaceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.IPs) > 0 { + for _, s := range m.IPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HardwareAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AllNodes != nil { + n += 2 + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PerDeviceNodeSelection != nil { + n += 2 + } + if len(m.SharedCounters) > 0 { + for _, e := range m.SharedCounters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocatedDeviceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&AllocatedDeviceStatus{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1) + `,`, + `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`, + `ShareID:` + valueToStringGenerated(this.ShareID) + `,`, + `}`, + }, "") + return s +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllocationResult{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllocationTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.AllocationTimestamp), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CELDeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *CapacityRequestPolicy) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidValues := "[]Quantity{" + for _, f := range this.ValidValues { + repeatedStringForValidValues += fmt.Sprintf("%v", f) + "," + } + repeatedStringForValidValues += "}" + s := strings.Join([]string{`&CapacityRequestPolicy{`, + `Default:` + strings.Replace(fmt.Sprintf("%v", this.Default), "Quantity", "resource.Quantity", 1) + `,`, + `ValidValues:` + repeatedStringForValidValues + `,`, + `ValidRange:` + strings.Replace(this.ValidRange.String(), "CapacityRequestPolicyRange", "CapacityRequestPolicyRange", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CapacityRequestPolicyRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CapacityRequestPolicyRange{`, + `Min:` + strings.Replace(fmt.Sprintf("%v", this.Min), "Quantity", "resource.Quantity", 1) + `,`, + `Max:` + strings.Replace(fmt.Sprintf("%v", this.Max), "Quantity", "resource.Quantity", 1) + `,`, + `Step:` + strings.Replace(fmt.Sprintf("%v", this.Step), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CapacityRequirements) String() string { + if this == nil { + return "nil" + } + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[QualifiedName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&CapacityRequirements{`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *Counter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Counter{`, + `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CounterSet) String() string { + if this == nil { + return "nil" + } + keysForCounters := make([]string, 0, len(this.Counters)) + for k := range this.Counters { + keysForCounters = append(keysForCounters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + mapStringForCounters := "map[string]Counter{" + for _, k := range keysForCounters { + mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k]) + } + mapStringForCounters += "}" + s := strings.Join([]string{`&CounterSet{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Counters:` + mapStringForCounters + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + repeatedStringForConsumesCounters := "[]DeviceCounterConsumption{" + for _, f := range this.ConsumesCounters { + repeatedStringForConsumesCounters += strings.Replace(strings.Replace(f.String(), "DeviceCounterConsumption", "DeviceCounterConsumption", 1), `&`, ``, 1) + "," + } + repeatedStringForConsumesCounters += "}" + repeatedStringForTaints := "[]DeviceTaint{" + for _, f := range this.Taints { + repeatedStringForTaints += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTaints += "}" + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]DeviceCapacity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `ConsumesCounters:` + repeatedStringForConsumesCounters + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `BindsToNode:` + valueToStringGenerated(this.BindsToNode) + `,`, + `BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`, + `BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`, + `AllowMultipleAllocations:` + valueToStringGenerated(this.AllowMultipleAllocations) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceCapacity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceCapacity{`, + `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `RequestPolicy:` + strings.Replace(this.RequestPolicy.String(), "CapacityRequestPolicy", "CapacityRequestPolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaim) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]DeviceRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequests += "}" + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, + `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaimConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `ExtendedResourceName:` + valueToStringGenerated(this.ExtendedResourceName) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, + `DistinctAttribute:` + valueToStringGenerated(this.DistinctAttribute) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceCounterConsumption) String() string { + if this == nil { + return "nil" + } + keysForCounters := make([]string, 0, len(this.Counters)) + for k := range this.Counters { + keysForCounters = append(keysForCounters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + mapStringForCounters := "map[string]Counter{" + for _, k := range keysForCounters { + mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k]) + } + mapStringForCounters += "}" + s := strings.Join([]string{`&DeviceCounterConsumption{`, + `CounterSet:` + fmt.Sprintf("%v", this.CounterSet) + `,`, + `Counters:` + mapStringForCounters + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForFirstAvailable := "[]DeviceSubRequest{" + for _, f := range this.FirstAvailable { + repeatedStringForFirstAvailable += strings.Replace(strings.Replace(f.String(), "DeviceSubRequest", "DeviceSubRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForFirstAvailable += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Exactly:` + strings.Replace(this.Exactly.String(), "ExactDeviceRequest", "ExactDeviceRequest", 1) + `,`, + `FirstAvailable:` + repeatedStringForFirstAvailable + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + keysForConsumedCapacity := make([]string, 0, len(this.ConsumedCapacity)) + for k := range this.ConsumedCapacity { + keysForConsumedCapacity = append(keysForConsumedCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity) + mapStringForConsumedCapacity := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForConsumedCapacity { + mapStringForConsumedCapacity += fmt.Sprintf("%v: %v,", k, this.ConsumedCapacity[QualifiedName(k)]) + } + mapStringForConsumedCapacity += "}" + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`, + `BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`, + `ShareID:` + valueToStringGenerated(this.ShareID) + `,`, + `ConsumedCapacity:` + mapStringForConsumedCapacity + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSubRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + s := strings.Join([]string{`&DeviceSubRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceToleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceToleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ExactDeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + s := strings.Join([]string{`&ExactDeviceRequest{`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkDeviceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkDeviceData{`, + `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`, + `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`, + `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`, + `}`, + }, "") + return s +} +func (this *OpaqueDeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + repeatedStringForDevices := "[]AllocatedDeviceStatus{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + repeatedStringForSharedCounters := "[]CounterSet{" + for _, f := range this.SharedCounters { + repeatedStringForSharedCounters += strings.Replace(strings.Replace(f.String(), "CounterSet", "CounterSet", 1), `&`, ``, 1) + "," + } + repeatedStringForSharedCounters += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `PerDeviceNodeSelection:` + valueToStringGenerated(this.PerDeviceNodeSelection) + `,`, + `SharedCounters:` + repeatedStringForSharedCounters + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &runtime.RawExtension{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetworkData == nil { + m.NetworkData = &NetworkDeviceData{} + } + if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ShareID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocationTimestamp == nil { + m.AllocationTimestamp = &v1.Time{} + } + if err := m.AllocationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapacityRequestPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapacityRequestPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapacityRequestPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &resource.Quantity{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidValues = append(m.ValidValues, resource.Quantity{}) + if err := m.ValidValues[len(m.ValidValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidRange == nil { + m.ValidRange = &CapacityRequestPolicyRange{} + } + if err := m.ValidRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapacityRequestPolicyRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapacityRequestPolicyRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapacityRequestPolicyRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Min == nil { + m.Min = &resource.Quantity{} + } + if err := m.Min.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Max == nil { + m.Max = &resource.Quantity{} + } + if err := m.Max.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Step == nil { + m.Step = &resource.Quantity{} + } + if err := m.Step.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapacityRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapacityRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapacityRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requests == nil { + m.Requests = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Requests[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CounterSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CounterSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CounterSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counters == nil { + m.Counters = make(map[string]Counter) + } + var mapkey string + mapvalue := &Counter{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Counter{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Counters[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]DeviceCapacity) + } + var mapkey QualifiedName + mapvalue := &DeviceCapacity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceCapacity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumesCounters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsumesCounters = append(m.ConsumesCounters, DeviceCounterConsumption{}) + if err := m.ConsumesCounters[len(m.ConsumesCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllNodes = &b + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, DeviceTaint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BindsToNode", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BindsToNode = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowMultipleAllocations", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowMultipleAllocations = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceCapacity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceCapacity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceCapacity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestPolicy == nil { + m.RequestPolicy = &CapacityRequestPolicy{} + } + if err := m.RequestPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ExtendedResourceName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinctAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.DistinctAttribute = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceCounterConsumption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceCounterConsumption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceCounterConsumption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterSet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CounterSet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counters == nil { + m.Counters = make(map[string]Counter) + } + var mapkey string + mapvalue := &Counter{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Counter{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Counters[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exactly", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exactly == nil { + m.Exactly = &ExactDeviceRequest{} + } + if err := m.Exactly.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FirstAvailable = append(m.FirstAvailable, DeviceSubRequest{}) + if err := m.FirstAvailable[len(m.FirstAvailable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.ShareID = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumedCapacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsumedCapacity == nil { + m.ConsumedCapacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ConsumedCapacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSubRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSubRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSubRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = &CapacityRequirements{} + } + if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceTaint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeAdded == nil { + m.TimeAdded = &v1.Time{} + } + if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceToleration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceToleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceToleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = DeviceTolerationOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TolerationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExactDeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExactDeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExactDeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = &CapacityRequirements{} + } + if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InterfaceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HardwareAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, AllocatedDeviceStatus{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllNodes = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerDeviceNodeSelection", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PerDeviceNodeSelection = &b + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SharedCounters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SharedCounters = append(m.SharedCounters, CounterSet{}) + if err := m.SharedCounters[len(m.SharedCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/openshift/tools/vendor/k8s.io/api/resource/v1/generated.proto b/openshift/tools/vendor/k8s.io/api/resource/v1/generated.proto new file mode 100644 index 0000000000..816a430c26 --- /dev/null +++ b/openshift/tools/vendor/k8s.io/api/resource/v1/generated.proto @@ -0,0 +1,1589 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1"; + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +// +// The combination of Driver, Pool, Device, and ShareID must match the corresponding key +// in Status.Allocation.Devices. +message AllocatedDeviceStatus { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 2; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 3; + + // ShareID uniquely identifies an individual allocation share of the device. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional string shareID = 7; + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // Must not contain more than 8 entries. + // + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4; + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5; + + // NetworkData contains network-related information specific to the device. + // + // +optional + optional NetworkDeviceData networkData = 6; +} + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; + + // AllocationTimestamp stores the time when the resources were allocated. + // This field is not guaranteed to be set, in which case that time is unknown. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gate. + // + // +optional + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time allocationTimestamp = 5; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device + // (v1.34+ with the DRAConsumableCapacity feature enabled). + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + optional string expression = 1; +} + +// CapacityRequestPolicy defines how requests consume device capacity. +// +// Must not set more than one ValidRequestValues. +message CapacityRequestPolicy { + // Default specifies how much of this capacity is consumed by a request + // that does not contain an entry for it in DeviceRequest's Capacity. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity default = 1; + + // ValidValues defines a set of acceptable quantity values in consuming requests. + // + // Must not contain more than 10 entries. + // Must be sorted in ascending order. + // + // If this field is set, + // Default must be defined and it must be included in ValidValues list. + // + // If the requested amount does not match any valid value but smaller than some valid values, + // the scheduler calculates the smallest valid value that is greater than or equal to the request. + // That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues). + // + // If the requested amount exceeds all valid values, the request violates the policy, + // and this device cannot be allocated. + // + // +optional + // +listType=atomic + // +oneOf=ValidRequestValues + repeated .k8s.io.apimachinery.pkg.api.resource.Quantity validValues = 3; + + // ValidRange defines an acceptable quantity value range in consuming requests. + // + // If this field is set, + // Default must be defined and it must fall within the defined ValidRange. + // + // If the requested amount does not fall within the defined range, the request violates the policy, + // and this device cannot be allocated. + // + // If the request doesn't contain this capacity entry, Default value is used. + // + // +optional + // +oneOf=ValidRequestValues + optional CapacityRequestPolicyRange validRange = 4; +} + +// CapacityRequestPolicyRange defines a valid range for consumable capacity values. +// +// - If the requested amount is less than Min, it is rounded up to the Min value. +// - If Step is set and the requested amount is between Min and Max but not aligned with Step, +// it will be rounded up to the next value equal to Min + (n * Step). +// - If Step is not set, the requested amount is used as-is if it falls within the range Min to Max (if set). +// - If the requested or rounded amount exceeds Max (if set), the request does not satisfy the policy, +// and the device cannot be allocated. +message CapacityRequestPolicyRange { + // Min specifies the minimum capacity allowed for a consumption request. + // + // Min must be greater than or equal to zero, + // and less than or equal to the capacity value. + // requestPolicy.default must be more than or equal to the minimum. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity min = 1; + + // Max defines the upper limit for capacity that can be requested. + // + // Max must be less than or equal to the capacity value. + // Min and requestPolicy.default must be less than or equal to the maximum. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity max = 2; + + // Step defines the step size between valid capacity amounts within the range. + // + // Max (if set) and requestPolicy.default must be a multiple of Step. + // Min + Step must be less than or equal to the capacity value. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity step = 3; +} + +// CapacityRequirements defines the capacity requirements for a specific device request. +message CapacityRequirements { + // Requests represent individual device resource requests for distinct resources, + // all of which must be provided by the device. + // + // This value is used as an additional filtering condition against the available capacity on the device. + // This is semantically equivalent to a CEL selector with + // `device.capacity[]..compareTo(quantity()) >= 0`. + // For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0. + // + // When a requestPolicy is defined, the requested amount is adjusted upward + // to the nearest valid value based on the policy. + // If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows— + // the device is considered ineligible for allocation. + // + // For any capacity that is not explicitly requested: + // - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity + // (i.e., the whole device is claimed). + // - If a requestPolicy is set, the default consumed capacity is determined according to that policy. + // + // If the device allows multiple allocation, + // the aggregated amount across all requests must not exceed the capacity value. + // The consumed capacity, which may be adjusted based on the requestPolicy if defined, + // is recorded in the resource claim’s status.devices[*].consumedCapacity field. + // + // +optional + map requests = 1; +} + +// Counter describes a quantity associated with a device. +message Counter { + // Value defines how much of a certain device counter is available. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; +} + +// CounterSet defines a named set of counters +// that are available to be used by devices defined in the +// ResourceSlice. +// +// The counters are not allocatable by themselves, but +// can be referenced by devices. When a device is allocated, +// the portion of counters it uses will no longer be available for use +// by other devices. +message CounterSet { + // Name defines the name of the counter set. + // It must be a DNS label. + // + // +required + optional string name = 1; + + // Counters defines the set of counters for this CounterSet + // The name of each counter must be unique in that set and must be a DNS label. + // + // The maximum number of counters in all sets is 32. + // + // +required + map counters = 2; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 2; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 3; + + // ConsumesCounters defines a list of references to sharedCounters + // and the set of counters that the device will + // consume from those counter sets. + // + // There can only be a single entry per counterSet. + // + // The total number of device counter consumption entries + // must be <= 32. In addition, the total number in the + // entire ResourceSlice must be <= 1024 (for example, + // 64 devices with 16 counters each). + // + // +optional + // +listType=atomic + // +featureGate=DRAPartitionableDevices + repeated DeviceCounterConsumption consumesCounters = 4; + + // NodeName identifies the node where the device is available. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional string nodeName = 5; + + // NodeSelector defines the nodes where the device is available. + // + // Must use exactly one term. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 6; + + // AllNodes indicates that all nodes have access to the device. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional bool allNodes = 7; + + // If specified, these are the driver-defined taints. + // + // The maximum number of taints is 4. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceTaint taints = 8; + + // BindsToNode indicates if the usage of an allocation involving this device + // has to be limited to exactly the node that was chosen when allocating the claim. + // If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector + // to match the node where the allocation was made. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + optional bool bindsToNode = 9; + + // BindingConditions defines the conditions for proceeding with binding. + // All of these conditions must be set in the per-device status + // conditions with a value of True to proceed with binding the pod to the node + // while scheduling the pod. + // + // The maximum number of binding conditions is 4. + // + // The conditions must be a valid condition type string. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingConditions = 10; + + // BindingFailureConditions defines the conditions for binding failure. + // They may be set in the per-device status conditions. + // If any is set to "True", a binding failure occurred. + // + // The maximum number of binding failure conditions is 4. + // + // The conditions must be a valid condition type string. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingFailureConditions = 11; + + // AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests. + // + // If AllowMultipleAllocations is set to true, the device can be allocated more than once, + // and all of its capacity is consumable, regardless of whether the requestPolicy is defined or not. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional bool allowMultipleAllocations = 12; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
[/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceCapacity describes a quantity associated with a device. +message DeviceCapacity { + // Value defines how much of a certain capacity that device has. + // + // This field reflects the fixed total capacity and does not change. + // The consumed amount is tracked separately by scheduler + // and does not affect this value. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + + // RequestPolicy defines how this DeviceCapacity must be consumed + // when the device is allowed to be shared by multiple allocations. + // + // The Device must have allowMultipleAllocations set to true in order to set a requestPolicy. + // + // If unset, capacity requests are unconstrained: + // requests can consume any amount of capacity, as long as the total consumed + // across all allocations does not exceed the device's defined capacity. + // If request is also unset, default is the full capacity value. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequestPolicy requestPolicy = 2; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
[/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; + + // ExtendedResourceName is the extended resource name for the devices of this class. + // The devices of this class can be used to satisfy a pod's extended resource requests. + // It has the same format as the name of a pod's extended resource. + // It should be unique among all the device classes in a cluster. + // If two device classes have the same name, then the class created later + // is picked to satisfy a pod's extended resource requests. + // If two classes are created at the same time, then the name of the class + // lexicographically sorted first is picked. + // + // This is an alpha field. + // +optional + // +featureGate=DRAExtendedResource + optional string extendedResourceName = 4; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
[/]. If just + // the main request is given, the constraint applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; + + // DistinctAttribute requires that all devices in question have this + // attribute and that its type and value are unique across those devices. + // + // This acts as the inverse of MatchAttribute. + // + // This constraint is used to avoid allocating multiple requests to the same device + // by ensuring attribute-level differentiation. + // + // This is useful for scenarios where resource requests must be fulfilled by separate physical devices. + // For example, a container requests two network interfaces that must be allocated from two different physical NICs. + // + // +optional + // +oneOf=ConstraintType + // +featureGate=DRAConsumableCapacity + optional string distinctAttribute = 3; +} + +// DeviceCounterConsumption defines a set of counters that +// a device will consume from a CounterSet. +message DeviceCounterConsumption { + // CounterSet is the name of the set from which the + // counters defined will be consumed. + // + // +required + optional string counterSet = 1; + + // Counters defines the counters that will be consumed by the device. + // + // The maximum number counters in a device is 32. + // In addition, the maximum number of all counters + // in all devices is 1024 (for example, 64 devices with + // 16 counters each). + // + // +required + map counters = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. With FirstAvailable it is also +// possible to provide a prioritized list of requests. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // References using the name in the DeviceRequest will uniquely + // identify a request when the Exactly field is set. When the + // FirstAvailable field is set, a reference to the name of the + // DeviceRequest will match whatever subrequest is chosen by the + // scheduler. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // Exactly specifies the details for a single request that must + // be met exactly for the request to be satisfied. + // + // One of Exactly or FirstAvailable must be set. + // + // +optional + // +oneOf=deviceRequestType + optional ExactDeviceRequest exactly = 2; + + // FirstAvailable contains subrequests, of which exactly one will be + // selected by the scheduler. It tries to + // satisfy them in the order in which they are listed here. So if + // there are two entries in the list, the scheduler will only check + // the second one if it determines that the first one can not be used. + // + // DRA does not yet implement scoring, so the scheduler will + // select the first set of devices that satisfies all the + // requests in the claim. And if the requirements can + // be satisfied on more than one node, other scheduling features + // will determine which node is chosen. This means that the set of + // devices allocated to a claim might not be the optimal set + // available to the cluster. Scoring will be implemented later. + // + // +optional + // +oneOf=deviceRequestType + // +listType=atomic + // +featureGate=DRAPrioritizedList + repeated DeviceSubRequest firstAvailable = 3; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. If it references a subrequest in the + // firstAvailable list on a DeviceRequest, this field must + // include both the name of the main request and the subrequest + // using the format
/. + // + // Multiple devices may have been allocated per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; + + // A copy of all tolerations specified in the request at the time + // when the device got allocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // BindingConditions contains a copy of the BindingConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingConditions = 7; + + // BindingFailureConditions contains a copy of the BindingFailureConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingFailureConditions = 8; + + // ShareID uniquely identifies an individual allocation share of the device, + // used when the device supports multiple simultaneous allocations. + // It serves as an additional map key to differentiate concurrent shares + // of the same device. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional string shareID = 9; + + // ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. + // The consumed amount may differ from the requested amount: it is rounded up to the nearest valid + // value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). + // + // The total consumed capacity for each device must not exceed the DeviceCapacity's Value. + // + // This field is populated only for devices that allow multiple allocations. + // All capacity entries are included, even if the consumed amount is zero. + // + // +optional + // +featureGate=DRAConsumableCapacity + map consumedCapacity = 10; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// DeviceSubRequest describes a request for device provided in the +// claim.spec.devices.requests[].firstAvailable array. Each +// is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// DeviceSubRequest is similar to ExactDeviceRequest, but doesn't expose the +// AdminAccess field as that one is only supported when requesting a +// specific device. +message DeviceSubRequest { + // Name can be used to reference this subrequest in the list of constraints + // or the list of configurations for the claim. References must use the + // format
/. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // subrequest. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // subrequest. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this subrequest. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This subrequest is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AllocationMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other subrequests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // If specified, the request's tolerations. + // + // Tolerations for NoSchedule are required to allocate a + // device which has a taint with that effect. The same applies + // to NoExecute. + // + // In addition, should any of the allocated devices get tainted + // with NoExecute after allocation and that effect is not tolerated, + // then all pods consuming the ResourceClaim get deleted to evict + // them. The scheduler will not let new pods reserve the claim while + // it has these tainted devices. Once all pods are evicted, the + // claim will get deallocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // Capacity define resource requirements against each capacity. + // + // If this field is unset and the device supports multiple allocations, + // the default value will be applied to each capacity according to requestPolicy. + // For the capacity that has no requestPolicy, default is the full capacity value. + // + // Applies to each device allocation. + // If Count > 1, + // the request fails if there aren't enough devices that meet the requirements. + // If AllocationMode is set to All, + // the request fails if there are devices that otherwise match the request, + // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequirements capacity = 7; +} + +// The device this taint is attached to has the "effect" on +// any claim which does not tolerate the taint and, through the claim, +// to pods using the claim. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message DeviceTaint { + // The taint key to be applied to a device. + // Must be a label name. + // + // +required + optional string key = 1; + + // The taint value corresponding to the taint key. + // Must be a label value. + // + // +optional + optional string value = 2; + + // The effect of the taint on claims that do not tolerate the taint + // and through such claims on the pods using them. + // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for + // nodes is not valid here. + // + // +required + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // Added automatically during create or update if not set. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message DeviceToleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // Must be a label name. + // + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a ResourceClaim can + // tolerate all taints of a particular category. + // + // +optional + // +default="Equal" + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value must be empty, otherwise just a regular string. + // Must be a label value. + // + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and NoExecute. + // + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // If larger than zero, the time when the pod needs to be evicted is calculated as